rcsb-embedding-model 0.0.5__py3-none-any.whl → 0.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rcsb-embedding-model might be problematic. Click here for more details.

@@ -0,0 +1,9 @@
1
+
2
+
3
+ def arg_devices(devices):
4
+ if len(devices) == 1:
5
+ return devices[0] if devices[0] == "auto" else int(devices[0])
6
+ return [int(x) for x in devices]
7
+
8
+
9
+
@@ -0,0 +1,175 @@
1
+ from typing import Annotated, List
2
+
3
+ import typer
4
+
5
+ from rcsb_embedding_model.cli.args_utils import arg_devices
6
+ from rcsb_embedding_model.types.api_types import SrcFormat, Accelerator, SrcLocation
7
+
8
+ app = typer.Typer(
9
+ add_completion=False
10
+ )
11
+
12
+
13
+ @app.command(
14
+ name="residue-embedding",
15
+ help="Calculate residue level embeddings of protein structures using ESM3."
16
+ )
17
+ def residue_embedding(
18
+ src_file: Annotated[typer.FileText, typer.Option(
19
+ exists=True,
20
+ file_okay=True,
21
+ dir_okay=False,
22
+ resolve_path=True,
23
+ help='CSV file 3 columns: Structure File Path | Chain Id (asym_i for cif files) | Output file name.'
24
+ )],
25
+ output_path: Annotated[typer.FileText, typer.Option(
26
+ exists=True,
27
+ file_okay=False,
28
+ dir_okay=True,
29
+ resolve_path=True,
30
+ help='Output path to store predictions.'
31
+ )],
32
+ src_location: Annotated[SrcLocation, typer.Option(
33
+ help='Source input location.'
34
+ )] = SrcLocation.local,
35
+ src_format: Annotated[SrcFormat, typer.Option(
36
+ help='Structure file format.'
37
+ )] = SrcFormat.mmcif,
38
+ batch_size: Annotated[int, typer.Option(
39
+ help='Number of samples processed together in one iteration.'
40
+ )] = 1,
41
+ num_workers: Annotated[int, typer.Option(
42
+ help='Number of subprocesses to use for data loading.'
43
+ )] = 0,
44
+ num_nodes: Annotated[int, typer.Option(
45
+ help='Number of nodes to use for inference.'
46
+ )] = 1,
47
+ accelerator: Annotated[Accelerator, typer.Option(
48
+ help='Device used for inference.'
49
+ )] = Accelerator.auto,
50
+ devices: Annotated[List[str], typer.Option(
51
+ help='The devices to use. Can be set to a positive number or "auto". Repeat this argument to indicate multiple indices of devices. "auto" for automatic selection based on the chosen accelerator.'
52
+ )] = tuple(['auto'])
53
+ ):
54
+ from rcsb_embedding_model.inference.esm_inference import predict
55
+ predict(
56
+ csv_file=src_file,
57
+ src_location=src_location,
58
+ src_format=src_format,
59
+ batch_size=batch_size,
60
+ num_workers=num_workers,
61
+ num_nodes=num_nodes,
62
+ accelerator=accelerator,
63
+ devices=arg_devices(devices),
64
+ out_path=output_path
65
+ )
66
+
67
+
68
+ @app.command(
69
+ name="structure-embedding",
70
+ help="Calculate single-chain protein embeddings from structural files. Predictions are stored in a single pandas data-frame file."
71
+ )
72
+ def structure_embedding(
73
+ src_file: Annotated[typer.FileText, typer.Option(
74
+ exists=True,
75
+ file_okay=True,
76
+ dir_okay=False,
77
+ resolve_path=True,
78
+ help='CSV file 3 columns: Structure File Path | Chain Id (asym_i for cif files) | Output file name.'
79
+ )],
80
+ output_path: Annotated[typer.FileText, typer.Option(
81
+ exists=True,
82
+ file_okay=False,
83
+ dir_okay=True,
84
+ resolve_path=True,
85
+ help='Output path to store predictions.'
86
+ )],
87
+ out_df_id: Annotated[str, typer.Option(
88
+ help='File name to store predicted embeddings.'
89
+ )],
90
+ src_location: Annotated[SrcLocation, typer.Option(
91
+ help='Source input location.'
92
+ )] = SrcLocation.local,
93
+ src_format: Annotated[SrcFormat, typer.Option(
94
+ help='Structure file format.'
95
+ )] = SrcFormat.mmcif,
96
+ batch_size: Annotated[int, typer.Option(
97
+ help='Number of samples processed together in one iteration.'
98
+ )] = 1,
99
+ num_workers: Annotated[int, typer.Option(
100
+ help='Number of subprocesses to use for data loading.'
101
+ )] = 0,
102
+ num_nodes: Annotated[int, typer.Option(
103
+ help='Number of nodes to use for inference.'
104
+ )] = 1,
105
+ accelerator: Annotated[Accelerator, typer.Option(
106
+ help='Device used for inference.'
107
+ )] = Accelerator.auto,
108
+ devices: Annotated[List[str], typer.Option(
109
+ help='The devices to use. Can be set to a positive number or "auto". Repeat this argument to indicate multiple indices of devices. "auto" for automatic selection based on the chosen accelerator.'
110
+ )] = tuple(['auto'])
111
+ ):
112
+ from rcsb_embedding_model.inference.structure_inference import predict
113
+ predict(
114
+ csv_file=src_file,
115
+ src_location=src_location,
116
+ src_format=src_format,
117
+ batch_size=batch_size,
118
+ num_workers=num_workers,
119
+ num_nodes=num_nodes,
120
+ accelerator=accelerator,
121
+ devices=arg_devices(devices),
122
+ out_path=output_path,
123
+ out_df_id=out_df_id
124
+ )
125
+
126
+
127
+ @app.command(
128
+ name="chain-embedding",
129
+ help="Calculate single-chain protein embeddings from residue level embeddings stored as torch tensor files."
130
+ )
131
+ def chain_embedding(
132
+ src_file: Annotated[typer.FileText, typer.Option(
133
+ exists=True,
134
+ file_okay=True,
135
+ dir_okay=False,
136
+ resolve_path=True,
137
+ help='CSV file 2 columns: Residue Embedding Tensor File | Output file name.'
138
+ )],
139
+ output_path: Annotated[typer.FileText, typer.Option(
140
+ exists=True,
141
+ file_okay=False,
142
+ dir_okay=True,
143
+ resolve_path=True,
144
+ help='Output path to store predictions.'
145
+ )],
146
+ batch_size: Annotated[int, typer.Option(
147
+ help='Number of samples processed together in one iteration.'
148
+ )] = 1,
149
+ num_workers: Annotated[int, typer.Option(
150
+ help='Number of subprocesses to use for data loading.'
151
+ )] = 0,
152
+ num_nodes: Annotated[int, typer.Option(
153
+ help='Number of nodes to use for inference.'
154
+ )] = 1,
155
+ accelerator: Annotated[Accelerator, typer.Option(
156
+ help='Device used for inference.'
157
+ )] = Accelerator.auto,
158
+ devices: Annotated[List[str], typer.Option(
159
+ help='The devices to use. Can be set to a positive number or "auto". Repeat this argument to indicate multiple indices of devices. "auto" for automatic selection based on the chosen accelerator.'
160
+ )] = tuple(['auto'])
161
+ ):
162
+ from rcsb_embedding_model.inference.chain_inference import predict
163
+ predict(
164
+ csv_file=src_file,
165
+ batch_size=batch_size,
166
+ num_workers=num_workers,
167
+ num_nodes=num_nodes,
168
+ accelerator=accelerator,
169
+ devices=arg_devices(devices),
170
+ out_path=output_path
171
+ )
172
+
173
+
174
+ if __name__ == "__main__":
175
+ app()
@@ -0,0 +1,90 @@
1
+ import argparse
2
+
3
+ import torch
4
+ from biotite.structure import chain_iter
5
+ from esm.models.esm3 import ESM3
6
+ from esm.sdk.api import ESMProtein, SamplingConfig
7
+ from esm.utils.constants.models import ESM3_OPEN_SMALL
8
+ from esm.utils.structure.protein_chain import ProteinChain
9
+ from torch.utils.data import Dataset, DataLoader
10
+ import pandas as pd
11
+
12
+ from rcsb_embedding_model.types.api_types import SrcFormat, SrcLocation
13
+ from rcsb_embedding_model.utils.data import stringio_from_url
14
+ from rcsb_embedding_model.utils.structure_parser import get_structure_from_src
15
+
16
+
17
+ class EsmProtFromCsv(Dataset):
18
+
19
+ MIN_RES = 10
20
+ STREAM_ATTR = 'stream'
21
+ CH_ATTR = 'chain_id'
22
+ NAME_ATTR = 'name'
23
+
24
+ COLUMNS = [STREAM_ATTR, CH_ATTR, NAME_ATTR]
25
+
26
+ def __init__(
27
+ self,
28
+ csv_file,
29
+ src_location=SrcLocation.local,
30
+ src_format=SrcFormat.mmcif,
31
+ ):
32
+ super().__init__()
33
+ self.src_location = src_location
34
+ self.src_format = src_format
35
+ self.data = pd.DataFrame()
36
+ self.__load_stream(csv_file)
37
+
38
+ def __load_stream(self, stream_list):
39
+ self.data = pd.read_csv(
40
+ stream_list,
41
+ header=None,
42
+ index_col=None,
43
+ names=EsmProtFromCsv.COLUMNS
44
+ )
45
+
46
+ def __len__(self):
47
+ return len(self.data)
48
+
49
+ def __getitem__(self, idx):
50
+ src_structure = self.data.loc[idx, EsmProtFromCsv.STREAM_ATTR]
51
+ chain_id = self.data.loc[idx, EsmProtFromCsv.CH_ATTR]
52
+ name = self.data.loc[idx, EsmProtFromCsv.NAME_ATTR]
53
+ structure = get_structure_from_src(
54
+ src_structure=src_structure if self.src_location == SrcLocation.local else stringio_from_url(src_structure),
55
+ src_format=self.src_format,
56
+ chain_id=chain_id
57
+ )
58
+ for atom_ch in chain_iter(structure):
59
+ protein_chain = ProteinChain.from_atomarray(atom_ch)
60
+ return ESMProtein.from_protein_chain(protein_chain), name
61
+
62
+
63
+ if __name__ == '__main__':
64
+
65
+ parser = argparse.ArgumentParser()
66
+ parser.add_argument('--file_list', type=argparse.FileType('r'), required=True)
67
+ args = parser.parse_args()
68
+
69
+ dataset = EsmProtFromCsv(
70
+ args.file_list
71
+ )
72
+
73
+ esm3 = ESM3.from_pretrained(
74
+ ESM3_OPEN_SMALL,
75
+ torch.device("cpu")
76
+ )
77
+
78
+ dataloader = DataLoader(
79
+ dataset,
80
+ batch_size=2,
81
+ collate_fn=lambda _: _
82
+ )
83
+
84
+ for _batch in dataloader:
85
+ for esm_prot, name in _batch:
86
+ protein_tensor = esm3.encode(esm_prot)
87
+ embeddings = esm3.forward_and_sample(
88
+ protein_tensor, SamplingConfig(return_per_residue_embeddings=True)
89
+ ).per_residue_embedding
90
+ print(name, embeddings.shape)
@@ -0,0 +1,32 @@
1
+ import pandas as pd
2
+ import torch
3
+ from torch.utils.data import Dataset
4
+
5
+
6
+ class ResidueEmbeddingFromCSV(Dataset):
7
+
8
+ STREAM_ATTR = 'stream'
9
+ NAME_ATTR = 'name'
10
+
11
+ COLUMNS = [STREAM_ATTR, NAME_ATTR]
12
+
13
+ def __init__(self, csv_file):
14
+ super().__init__()
15
+ self.data = pd.DataFrame()
16
+ self.__load_stream(csv_file)
17
+
18
+ def __load_stream(self, csv_file):
19
+ self.data = pd.read_csv(
20
+ csv_file,
21
+ header=None,
22
+ index_col=None,
23
+ names=ResidueEmbeddingFromCSV.COLUMNS
24
+ )
25
+
26
+ def __len__(self):
27
+ return len(self.data)
28
+
29
+ def __getitem__(self, idx):
30
+ embedding_src = self.data.loc[idx, ResidueEmbeddingFromCSV.STREAM_ATTR]
31
+ name = self.data.loc[idx, ResidueEmbeddingFromCSV.NAME_ATTR]
32
+ return torch.load(embedding_src, map_location=torch.device('cpu')), name
@@ -0,0 +1,50 @@
1
+ from torch.utils.data import DataLoader
2
+ from lightning import Trainer
3
+ from typer import FileText
4
+
5
+ from rcsb_embedding_model.dataset.residue_embedding_from_csv import ResidueEmbeddingFromCSV
6
+ from rcsb_embedding_model.modules.chain_module import ChainModule
7
+ from rcsb_embedding_model.types.api_types import Accelerator, Devices, OptionalPath
8
+ from rcsb_embedding_model.utils.data import collate_seq_embeddings
9
+ from rcsb_embedding_model.writer.batch_writer import CsvBatchWriter
10
+
11
+
12
+ def predict(
13
+ csv_file: FileText,
14
+ batch_size: int = 1,
15
+ num_workers: int = 0,
16
+ num_nodes: int = 1,
17
+ accelerator: Accelerator = Accelerator.auto,
18
+ devices: Devices = 'auto',
19
+ out_path: OptionalPath = None
20
+ ):
21
+ inference_set = ResidueEmbeddingFromCSV(
22
+ csv_file=csv_file
23
+ )
24
+
25
+ inference_dataloader = DataLoader(
26
+ dataset=inference_set,
27
+ batch_size=batch_size,
28
+ num_workers=num_workers,
29
+ collate_fn=lambda emb: (
30
+ collate_seq_embeddings([x for x, z in emb]),
31
+ tuple([z for x, z in emb])
32
+ )
33
+ )
34
+
35
+ module = ChainModule()
36
+
37
+ inference_writer = CsvBatchWriter(out_path) if out_path is not None else None
38
+ trainer = Trainer(
39
+ callbacks=[inference_writer] if inference_writer is not None else None,
40
+ num_nodes=num_nodes,
41
+ accelerator=accelerator,
42
+ devices=devices
43
+ )
44
+
45
+ prediction = trainer.predict(
46
+ module,
47
+ inference_dataloader
48
+ )
49
+
50
+ return prediction
@@ -0,0 +1,50 @@
1
+ from torch.utils.data import DataLoader
2
+ from lightning import Trainer
3
+ from typer import FileText
4
+
5
+ from rcsb_embedding_model.dataset.esm_prot_from_csv import EsmProtFromCsv
6
+ from rcsb_embedding_model.modules.esm_module import EsmModule
7
+ from rcsb_embedding_model.types.api_types import SrcFormat, Accelerator, Devices, OptionalPath, SrcLocation
8
+ from rcsb_embedding_model.writer.batch_writer import TensorBatchWriter
9
+
10
+
11
+ def predict(
12
+ csv_file: FileText,
13
+ src_location: SrcLocation = SrcLocation.local,
14
+ src_format: SrcFormat = SrcFormat.mmcif,
15
+ batch_size: int = 1,
16
+ num_workers: int = 0,
17
+ num_nodes: int = 1,
18
+ accelerator: Accelerator = Accelerator.auto,
19
+ devices: Devices = 'auto',
20
+ out_path: OptionalPath = None
21
+ ):
22
+
23
+ inference_set = EsmProtFromCsv(
24
+ csv_file=csv_file,
25
+ src_location=src_location,
26
+ src_format=src_format
27
+ )
28
+
29
+ inference_dataloader = DataLoader(
30
+ dataset=inference_set,
31
+ batch_size=batch_size,
32
+ num_workers=num_workers,
33
+ collate_fn=lambda _: _
34
+ )
35
+
36
+ module = EsmModule()
37
+ inference_writer = TensorBatchWriter(out_path) if out_path is not None else None
38
+ trainer = Trainer(
39
+ callbacks=[inference_writer] if inference_writer is not None else None,
40
+ num_nodes=num_nodes,
41
+ accelerator=accelerator,
42
+ devices=devices
43
+ )
44
+
45
+ prediction = trainer.predict(
46
+ module,
47
+ inference_dataloader
48
+ )
49
+
50
+ return prediction
@@ -0,0 +1,51 @@
1
+ from torch.utils.data import DataLoader
2
+ from lightning import Trainer
3
+ from typer import FileText
4
+
5
+ from rcsb_embedding_model.dataset.esm_prot_from_csv import EsmProtFromCsv
6
+ from rcsb_embedding_model.modules.esm_module import EsmModule
7
+ from rcsb_embedding_model.types.api_types import SrcFormat, Accelerator, Devices, OptionalPath, SrcLocation
8
+ from rcsb_embedding_model.writer.batch_writer import DataFrameStorage
9
+
10
+
11
+ def predict(
12
+ csv_file: FileText,
13
+ src_location: SrcLocation = SrcLocation.local,
14
+ src_format: SrcFormat = SrcFormat.mmcif,
15
+ batch_size: int = 1,
16
+ num_workers: int = 0,
17
+ num_nodes: int = 1,
18
+ accelerator: Accelerator = Accelerator.auto,
19
+ devices: Devices = 'auto',
20
+ out_path: OptionalPath = None,
21
+ out_df_id: str = None
22
+ ):
23
+
24
+ inference_set = EsmProtFromCsv(
25
+ csv_file=csv_file,
26
+ src_location=src_location,
27
+ src_format=src_format
28
+ )
29
+
30
+ inference_dataloader = DataLoader(
31
+ dataset=inference_set,
32
+ batch_size=batch_size,
33
+ num_workers=num_workers,
34
+ collate_fn=lambda _: _
35
+ )
36
+
37
+ module = EsmModule()
38
+ inference_writer = DataFrameStorage(out_path, out_df_id) if out_path is not None and out_df_id is not None else None
39
+ trainer = Trainer(
40
+ callbacks=[inference_writer] if inference_writer is not None else None,
41
+ num_nodes=num_nodes,
42
+ accelerator=accelerator,
43
+ devices=devices
44
+ )
45
+
46
+ prediction = trainer.predict(
47
+ module,
48
+ inference_dataloader
49
+ )
50
+
51
+ return prediction
@@ -0,0 +1,16 @@
1
+ from lightning import LightningModule
2
+
3
+ from rcsb_embedding_model.utils.model import get_aggregator_model
4
+
5
+
6
+ class ChainModule(LightningModule):
7
+
8
+ def __init__(
9
+ self
10
+ ):
11
+ super().__init__()
12
+ self.model = get_aggregator_model(device=self.device)
13
+
14
+ def predict_step(self, batch, batch_idx):
15
+ (x, x_mask), dom_id = batch
16
+ return self.model(x, x_mask), dom_id
@@ -0,0 +1,24 @@
1
+ from esm.sdk.api import SamplingConfig
2
+ from lightning import LightningModule
3
+
4
+ from rcsb_embedding_model.utils.model import get_residue_model
5
+
6
+
7
+ class EsmModule(LightningModule):
8
+
9
+ def __init__(
10
+ self
11
+ ):
12
+ super().__init__()
13
+ self.esm3 = get_residue_model(self.device)
14
+
15
+ def predict_step(self, prot_batch, batch_idx):
16
+ prot_embeddings = []
17
+ prot_names = []
18
+ for esm_prot, name in prot_batch:
19
+ embeddings = self.esm3.forward_and_sample(
20
+ self.esm3.encode(esm_prot), SamplingConfig(return_per_residue_embeddings=True)
21
+ ).per_residue_embedding
22
+ prot_embeddings.append(embeddings)
23
+ prot_names.append(name)
24
+ return tuple(prot_embeddings), tuple(prot_names)
@@ -0,0 +1,27 @@
1
+ from esm.sdk.api import SamplingConfig
2
+ from lightning import LightningModule
3
+
4
+ from rcsb_embedding_model.utils.data import collate_seq_embeddings
5
+ from rcsb_embedding_model.utils.model import get_residue_model, get_aggregator_model
6
+
7
+
8
+ class StructureModule(LightningModule):
9
+
10
+ def __init__(
11
+ self
12
+ ):
13
+ super().__init__()
14
+ self.esm3 = get_residue_model(self.device)
15
+ self.aggregator = get_aggregator_model(device=self.device)
16
+
17
+ def predict_step(self, prot_batch, batch_idx):
18
+ prot_embeddings = []
19
+ prot_names = []
20
+ for esm_prot, name in prot_batch:
21
+ embeddings = self.esm3.forward_and_sample(
22
+ self.esm3.encode(esm_prot), SamplingConfig(return_per_residue_embeddings=True)
23
+ ).per_residue_embedding
24
+ prot_embeddings.append(embeddings)
25
+ prot_names.append(name)
26
+ res_batch_embedding, res_batch_mask = collate_seq_embeddings(prot_embeddings)
27
+ return self.aggregator(res_batch_embedding, res_batch_mask), tuple(prot_names)
@@ -1,55 +1,63 @@
1
1
  import torch
2
2
  from biotite.structure import get_residues, chain_iter, filter_amino_acids
3
- from biotite.structure.io.pdb import PDBFile, get_structure as get_pdb_structure, get_assembly as get_pdb_assembly
4
- from biotite.structure.io.pdbx import CIFFile, get_structure, get_assembly, BinaryCIFFile
5
- from esm.models.esm3 import ESM3
6
3
  from esm.sdk.api import ESMProtein, SamplingConfig
7
- from esm.utils.constants.models import ESM3_OPEN_SMALL
8
4
  from esm.utils.structure.protein_chain import ProteinChain
9
- from huggingface_hub import hf_hub_download
10
5
 
11
- from rcsb_embedding_model.model.residue_embedding_aggregator import ResidueEmbeddingAggregator
6
+ from rcsb_embedding_model.types.api_types import StreamSrc, SrcFormat
7
+ from rcsb_embedding_model.utils.model import get_aggregator_model, get_residue_model
8
+ from rcsb_embedding_model.utils.structure_parser import get_structure_from_src
12
9
 
13
10
 
14
11
  class RcsbStructureEmbedding:
15
12
 
16
13
  MIN_RES = 10
17
- REPO_ID = "rcsb/rcsb-embedding-model"
18
- FILE_NAME = "rcsb-embedding-model.pt"
19
- VERSION = "410606e40b1bb7968ce318c41009355c3ac32503"
20
14
 
21
15
  def __init__(self):
22
16
  self.__residue_embedding = None
23
17
  self.__aggregator_embedding = None
24
18
 
25
- def load_models(self, device=None):
19
+ def load_models(
20
+ self,
21
+ device: torch.device = None
22
+ ):
26
23
  self.load_residue_embedding(device)
27
24
  self.load_aggregator_embedding(device)
28
25
 
29
- def load_residue_embedding(self, device=None):
26
+ def load_residue_embedding(
27
+ self,
28
+ device: torch.device = None
29
+ ):
30
30
  if not device:
31
31
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
32
32
  self.__residue_embedding = _load_res_model(device)
33
33
 
34
- def load_aggregator_embedding(self, device=None):
34
+ def load_aggregator_embedding(
35
+ self,
36
+ device: torch.device = None
37
+ ):
35
38
  if not device:
36
39
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
37
- self.__aggregator_embedding = _load_model(
38
- _download_model(
39
- RcsbStructureEmbedding.REPO_ID,
40
- RcsbStructureEmbedding.FILE_NAME,
41
- RcsbStructureEmbedding.VERSION
42
- ),
43
- device
44
- )
45
-
46
- def structure_embedding(self, structure_src, format="pdb", chain_id=None, assembly_id=None):
47
- res_embedding = self.residue_embedding(structure_src, format, chain_id, assembly_id)
40
+ self.__aggregator_embedding = _load_model(device)
41
+
42
+ def structure_embedding(
43
+ self,
44
+ src_structure: StreamSrc,
45
+ src_format: SrcFormat = SrcFormat.mmcif,
46
+ chain_id: str = None,
47
+ assembly_id: str = None
48
+ ):
49
+ res_embedding = self.residue_embedding(src_structure, src_format, chain_id, assembly_id)
48
50
  return self.aggregator_embedding(res_embedding)
49
51
 
50
- def residue_embedding(self, structure_src, format="pdb", chain_id=None, assembly_id=None):
52
+ def residue_embedding(
53
+ self,
54
+ src_structure: StreamSrc,
55
+ src_format: SrcFormat = SrcFormat.mmcif,
56
+ chain_id: str = None,
57
+ assembly_id: str = None
58
+ ):
51
59
  self.__check_residue_embedding()
52
- structure = _get_structure_from_src(structure_src, format, chain_id, assembly_id)
60
+ structure = get_structure_from_src(src_structure, src_format, chain_id, assembly_id)
53
61
  embedding_ch = []
54
62
  for atom_ch in chain_iter(structure):
55
63
  atom_res = atom_ch[filter_amino_acids(atom_ch)]
@@ -66,7 +74,10 @@ class RcsbStructureEmbedding:
66
74
  dim=0
67
75
  )
68
76
 
69
- def sequence_embedding(self, sequence):
77
+ def sequence_embedding(
78
+ self,
79
+ sequence: str
80
+ ):
70
81
  self.__check_residue_embedding()
71
82
 
72
83
  if sequence.startswith(">"):
@@ -85,7 +96,10 @@ class RcsbStructureEmbedding:
85
96
 
86
97
  return result.per_residue_embedding
87
98
 
88
- def aggregator_embedding(self, residue_embedding):
99
+ def aggregator_embedding(
100
+ self,
101
+ residue_embedding: torch.Tensor
102
+ ):
89
103
  self.__check_aggregator_embedding()
90
104
  return self.__aggregator_embedding(residue_embedding)
91
105
 
@@ -98,74 +112,16 @@ class RcsbStructureEmbedding:
98
112
  self.load_aggregator_embedding()
99
113
 
100
114
 
101
- def _get_structure_from_src(structure_src, format="pdb", chain_id=None, assembly_id=None):
102
- if format == "pdb":
103
- pdb_file = PDBFile.read(structure_src)
104
- structure = _get_pdb_structure(pdb_file, assembly_id)
105
- elif format == "mmcif":
106
- cif_file = CIFFile.read(structure_src)
107
- structure = _get_structure(cif_file, assembly_id)
108
- elif format == "binarycif":
109
- cif_file = BinaryCIFFile.read(structure_src)
110
- structure = _get_structure(cif_file, assembly_id)
111
- else:
112
- raise RuntimeError(f"Unknown file format {format}")
113
-
114
- if chain_id is not None:
115
- structure = structure[structure.chain_id == chain_id]
116
-
117
- return structure
118
-
119
-
120
- def _get_pdb_structure(pdb_file, assembly_id = None):
121
- return get_pdb_structure(
122
- pdb_file,
123
- model=1
124
- ) if assembly_id is None else get_pdb_assembly(
125
- pdb_file,
126
- assembly_id=assembly_id,
127
- model=1
128
- )
129
-
130
-
131
- def _get_structure(cif_file, assembly_id = None):
132
- return get_structure(
133
- cif_file,
134
- model=1,
135
- use_author_fields=False
136
- ) if assembly_id is None else get_assembly(
137
- cif_file,
138
- assembly_id=assembly_id,
139
- model=1,
140
- use_author_fields=False
141
- )
142
-
143
-
144
- def _download_model(
145
- repo_id,
146
- filename,
147
- revision
148
- ):
149
- return hf_hub_download(
150
- repo_id=repo_id,
151
- filename=filename,
152
- revision=revision
153
- )
154
-
155
-
156
- def _load_model(model_path, device=None):
115
+ def _load_model(device=None):
157
116
  if not device:
158
117
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
159
- weights = torch.load(model_path, weights_only=True, map_location=device)
160
- aggregator_model = ResidueEmbeddingAggregator()
161
- aggregator_model.load_state_dict(weights)
118
+ aggregator_model = get_aggregator_model(device=device)
162
119
  aggregator_model.to(device)
163
120
  aggregator_model.eval()
164
121
  return aggregator_model
165
122
 
166
123
 
167
124
  def _load_res_model(device=None):
168
- return ESM3.from_pretrained(
169
- ESM3_OPEN_SMALL,
170
- device
171
- )
125
+ if not device:
126
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
127
+ return get_residue_model(device)
@@ -0,0 +1,29 @@
1
+ from enum import Enum
2
+ from os import PathLike
3
+ from typing import NewType, Union, IO, Tuple, List, Optional
4
+
5
+ StreamSrc = NewType('StreamSrc', Union[PathLike, IO])
6
+ StreamTuple = NewType('StreamTuple', Tuple[StreamSrc, str, str])
7
+
8
+ Devices = NewType('Devices', Union[int, List[int], "auto"])
9
+
10
+ OptionalPath = NewType('OptionalPath', Optional[PathLike])
11
+
12
+
13
+ class SrcFormat(str, Enum):
14
+ pdb = "pdb"
15
+ mmcif = "mmcif"
16
+ bciff = "binarycif"
17
+
18
+
19
+ class Accelerator(str, Enum):
20
+ cpu = "cpu"
21
+ gpu = "gpu"
22
+ tpu = "tpu"
23
+ hpu = "hpu"
24
+ auto = "auto"
25
+
26
+
27
+ class SrcLocation(str, Enum):
28
+ local = "local"
29
+ remote = "remote"
@@ -0,0 +1,47 @@
1
+ from io import StringIO
2
+
3
+ import requests
4
+ import torch
5
+
6
+
7
+ def collate_seq_embeddings(batch_list):
8
+ """
9
+ Pads the tensors in a batch to the same size.
10
+
11
+ Args:
12
+ batch_list (list of torch.Tensor): A list of samples, where each sample is a tensor of shape (sequence_length, embedding_dim).
13
+
14
+ Returns:
15
+ tuple: A tuple containing:
16
+ - padded_batch (torch.Tensor): A tensor of shape (batch_size, max_seq_length, embedding_dim), where each sample is padded to the max sequence length.
17
+ - mask_batch (torch.Tensor): A tensor of shape (batch_size, max_seq_length) where padded positions are marked as False.
18
+ """
19
+ if batch_list[0] is None:
20
+ return None
21
+ device = batch_list[0].device # Get the device of the input tensors
22
+ max_len = max(sample.size(0) for sample in batch_list) # Determine the maximum sequence length
23
+ dim = batch_list[0].size(1) # Determine the embedding dimension
24
+ batch_size = len(batch_list) # Determine the batch size
25
+
26
+ # Initialize tensors for the padded batch and masks on the same device as the input tensors
27
+ padded_batch = torch.zeros((batch_size, max_len, dim), dtype=batch_list[0].dtype, device=device)
28
+ mask_batch = torch.ones((batch_size, max_len), dtype=torch.bool, device=device)
29
+
30
+ for i, sample in enumerate(batch_list):
31
+ seq_len = sample.size(0) # Get the length of the current sequence
32
+ padded_batch[i, :seq_len] = sample # Pad the sequence with zeros
33
+ mask_batch[i, :seq_len] = False # Set mask positions for the actual data to False
34
+
35
+ return padded_batch, mask_batch
36
+
37
+
38
+ def stringio_from_url(url):
39
+ try:
40
+ response = requests.get(url)
41
+ response.raise_for_status()
42
+ return StringIO(response.text)
43
+ except requests.exceptions.RequestException as e:
44
+ print(f"Error fetching URL: {e}")
45
+ return None
46
+
47
+
@@ -0,0 +1,29 @@
1
+ import torch
2
+ from esm.models.esm3 import ESM3
3
+ from esm.utils.constants.models import ESM3_OPEN_SMALL
4
+ from huggingface_hub import hf_hub_download
5
+
6
+ from rcsb_embedding_model.model.residue_embedding_aggregator import ResidueEmbeddingAggregator
7
+
8
+ REPO_ID = "rcsb/rcsb-embedding-model"
9
+ FILE_NAME = "rcsb-embedding-model.pt"
10
+ REVISION = "410606e40b1bb7968ce318c41009355c3ac32503"
11
+
12
+
13
+ def get_aggregator_model(device=None):
14
+ model_path = hf_hub_download(
15
+ repo_id=REPO_ID,
16
+ filename=FILE_NAME,
17
+ revision=REVISION
18
+ )
19
+ weights = torch.load(model_path, weights_only=True, map_location=device)
20
+ aggregator_model = ResidueEmbeddingAggregator()
21
+ aggregator_model.load_state_dict(weights)
22
+ return aggregator_model
23
+
24
+
25
+ def get_residue_model(device=None):
26
+ return ESM3.from_pretrained(
27
+ ESM3_OPEN_SMALL,
28
+ device
29
+ )
@@ -0,0 +1,51 @@
1
+
2
+ from biotite.structure.io.pdb import PDBFile, get_structure as get_pdb_structure, get_assembly as get_pdb_assembly
3
+ from biotite.structure.io.pdbx import CIFFile, get_structure, get_assembly, BinaryCIFFile
4
+
5
+
6
+ def get_structure_from_src(
7
+ src_structure,
8
+ src_format="mmcif",
9
+ chain_id=None,
10
+ assembly_id=None
11
+ ):
12
+ if src_format == "pdb":
13
+ pdb_file = PDBFile.read(src_structure)
14
+ structure = __get_pdb_structure(pdb_file, assembly_id)
15
+ elif src_format == "mmcif":
16
+ cif_file = CIFFile.read(src_structure)
17
+ structure = __get_structure(cif_file, assembly_id)
18
+ elif src_format == "binarycif":
19
+ cif_file = BinaryCIFFile.read(src_structure)
20
+ structure = __get_structure(cif_file, assembly_id)
21
+ else:
22
+ raise RuntimeError(f"Unknown file format {src_format}")
23
+
24
+ if chain_id is not None:
25
+ structure = structure[structure.chain_id == chain_id]
26
+
27
+ return structure
28
+
29
+
30
+ def __get_pdb_structure(pdb_file, assembly_id=None):
31
+ return get_pdb_structure(
32
+ pdb_file,
33
+ model=1
34
+ ) if assembly_id is None else get_pdb_assembly(
35
+ pdb_file,
36
+ assembly_id=assembly_id,
37
+ model=1
38
+ )
39
+
40
+
41
+ def __get_structure(cif_file, assembly_id=None):
42
+ return get_structure(
43
+ cif_file,
44
+ model=1,
45
+ use_author_fields=False
46
+ ) if assembly_id is None else get_assembly(
47
+ cif_file,
48
+ assembly_id=assembly_id,
49
+ model=1,
50
+ use_author_fields=False
51
+ )
@@ -0,0 +1,113 @@
1
+
2
+ from abc import abstractmethod
3
+ from collections import deque
4
+ from abc import ABC
5
+
6
+ import torch
7
+ import pandas as pd
8
+
9
+ from lightning.pytorch.callbacks import BasePredictionWriter
10
+
11
+
12
+ class CoreBatchWriter(BasePredictionWriter, ABC):
13
+ def __init__(
14
+ self,
15
+ output_path,
16
+ postfix,
17
+ write_interval="batch"
18
+ ):
19
+ super().__init__(write_interval)
20
+ self.out_path = output_path
21
+ self.postfix = postfix
22
+
23
+ def write_on_batch_end(
24
+ self,
25
+ trainer,
26
+ pl_module,
27
+ prediction,
28
+ batch_indices,
29
+ batch,
30
+ batch_idx,
31
+ dataloader_idx
32
+ ):
33
+ if prediction is None:
34
+ return
35
+ embeddings, dom_ids = prediction
36
+ deque(map(
37
+ self._write_embedding,
38
+ embeddings,
39
+ dom_ids
40
+ ))
41
+
42
+ def file_name(self, dom_id):
43
+ return f'{self.out_path}/{dom_id}.{self.postfix}'
44
+
45
+ @abstractmethod
46
+ def _write_embedding(self, embedding, dom_id):
47
+ pass
48
+
49
+
50
+ class CsvBatchWriter(CoreBatchWriter, ABC):
51
+ def __init__(
52
+ self,
53
+ output_path,
54
+ postfix="csv",
55
+ write_interval="batch"
56
+ ):
57
+ super().__init__(output_path, postfix, write_interval)
58
+
59
+ def _write_embedding(self, embedding, dom_id):
60
+ pd.DataFrame(embedding.to('cpu').numpy()).to_csv(
61
+ self.file_name(dom_id),
62
+ index=False,
63
+ header=False
64
+ )
65
+
66
+
67
+ class TensorBatchWriter(CoreBatchWriter, ABC):
68
+ def __init__(
69
+ self,
70
+ output_path,
71
+ postfix="pt",
72
+ write_interval="batch",
73
+ device="cpu"
74
+ ):
75
+ super().__init__(output_path, postfix, write_interval)
76
+ self.device = device
77
+
78
+ def _write_embedding(self, embedding, dom_id):
79
+ torch.save(
80
+ embedding.to(self.device),
81
+ self.file_name(dom_id)
82
+ )
83
+
84
+
85
+ class DataFrameStorage(CoreBatchWriter, ABC):
86
+ def __init__(
87
+ self,
88
+ output_path,
89
+ df_id,
90
+ postfix="pkl",
91
+ write_interval="batch"
92
+ ):
93
+ super().__init__(output_path, postfix, write_interval)
94
+ self.df_id = df_id
95
+ self.embedding = pd.DataFrame(
96
+ data={},
97
+ columns=['id', 'embedding'],
98
+ )
99
+
100
+ def _write_embedding(self, embedding, dom_id):
101
+ self.embedding = pd.concat([
102
+ self.embedding,
103
+ pd.DataFrame(
104
+ data={'id': dom_id, 'embedding': [embedding.to('cpu').numpy()]},
105
+ columns=['id', 'embedding'],
106
+ )
107
+ ], ignore_index=True)
108
+
109
+ def on_predict_end(self, trainer, pl_module):
110
+ self.embedding.to_pickle(
111
+ f"{self.out_path}/{self.df_id}.pkl.gz",
112
+ compression='gzip'
113
+ )
@@ -0,0 +1,126 @@
1
+ Metadata-Version: 2.4
2
+ Name: rcsb-embedding-model
3
+ Version: 0.0.7
4
+ Summary: Protein Embedding Model for Structure Search
5
+ Project-URL: Homepage, https://github.com/rcsb/rcsb-embedding-model
6
+ Project-URL: Issues, https://github.com/rcsb/rcsb-embedding-model/issues
7
+ Author-email: Joan Segura <joan.segura@rcsb.org>
8
+ License-Expression: BSD-3-Clause
9
+ License-File: LICENSE.md
10
+ Classifier: Operating System :: OS Independent
11
+ Classifier: Programming Language :: Python :: 3
12
+ Requires-Python: >=3.10
13
+ Requires-Dist: esm>=3.2.0
14
+ Requires-Dist: lightning>=2.5.0
15
+ Requires-Dist: torch>=2.2.0
16
+ Requires-Dist: typer>=0.15.0
17
+ Description-Content-Type: text/markdown
18
+
19
+ # RCSB Embedding Model
20
+
21
+ **Version** 0.0.7
22
+
23
+
24
+ ## Overview
25
+
26
+ RCSB Embedding Model is a neural network architecture designed to encode macromolecular 3D structures into fixed-length vector embeddings for efficient large-scale structure similarity search.
27
+
28
+ Preprint: [Multi-scale structural similarity embedding search across entire proteomes](https://www.biorxiv.org/content/10.1101/2025.02.28.640875v1).
29
+
30
+ A web-based implementation using this model for structure similarity search is available at [rcsb-embedding-search](http://embedding-search.rcsb.org).
31
+
32
+ If you are interested in training the model with a new dataset, visit the [rcsb-embedding-search repository](https://github.com/bioinsilico/rcsb-embedding-search), which provides scripts and documentation for training.
33
+
34
+
35
+ ## Features
36
+
37
+ - **Residue-level embeddings** computed using the ESM3 protein language model
38
+ - **Structure-level embeddings** aggregated via a transformer-based aggregator network
39
+ - **Command-line interface** implemented with Typer for high-throughput inference workflows
40
+ - **Python API** for interactive embedding computation and integration into analysis pipelines
41
+ - **High-performance inference** leveraging PyTorch Lightning, with multi-node and multi-GPU support
42
+
43
+ ---
44
+
45
+ ## Installation
46
+
47
+ pip install rcsb-embedding-model
48
+
49
+ **Requirements:**
50
+
51
+ - Python ≥ 3.10
52
+ - ESM ≥ 3.2.0
53
+ - PyTorch ≥ 2.2.0
54
+ - Lightning ≥ 2.5.0
55
+ - Typer ≥ 0.15.0
56
+
57
+ ---
58
+
59
+ ## Quick Start
60
+
61
+ ### CLI
62
+
63
+ # 1. Compute residue embeddings: Calculate residue level embeddings of protein structures using ESM3.
64
+ inference residue-embedding --src-file data/structures.csv --output-path results/residue_embeddings --src-format mmcif --batch-size 8 --devices auto
65
+
66
+ # 2. Compute structure embeddings: Calculate single-chain protein embeddings from structural files. Predictions are stored in a single pandas data-frame file.
67
+ inference structure-embedding --src-file results/residue_embeddings.csv --output-path results/structure_embeddings --out-df-id embeddings.pkl --batch-size 4 --devices 0 --devives 1
68
+
69
+ # 3. Compute chain embeddings: Calculate single-chain protein embeddings from residue level embeddings stored as torch tensor files.
70
+ inference chain-embedding --src-file results/residue_embeddings.csv --output-path results/chain_embeddings --batch-size 4
71
+
72
+ ### Python API
73
+
74
+ from rcsb_embedding_model import RcsbStructureEmbedding
75
+
76
+ model = RcsbStructureEmbedding()
77
+
78
+ # Compute per-residue embeddings
79
+ res_emb = model.residue_embedding(
80
+ src_structure="examples/1abc.cif",
81
+ src_format="mmcif",
82
+ chain_id="A"
83
+ )
84
+
85
+ # Aggregate to structure-level embedding
86
+ struct_emb = model.aggregator_embedding(res_emb)
87
+
88
+ See the examples directory for complete scripts.
89
+
90
+ ---
91
+
92
+ ## Model Architecture
93
+
94
+ The embedding model is trained to predict structural similarity by approximating TM-scores using cosine distances between embeddings. It consists of two main components:
95
+
96
+ - **Protein Language Model (PLM)**: Computes residue-level embeddings from a given 3D structure.
97
+ - **Residue Embedding Aggregator**: A transformer-based neural network that aggregates these residue-level embeddings into a single vector.
98
+
99
+ ![Embedding model architecture](assets/embedding-model-architecture.png)
100
+
101
+ ### **Protein Language Model (PLM)**
102
+ Residue-wise embeddings of protein structures are computed using the [ESM3](https://www.evolutionaryscale.ai/) generative protein language model.
103
+
104
+ ### **Residue Embedding Aggregator**
105
+ The aggregation component consists of six transformer encoder layers, each with a 3,072-neuron feedforward layer and ReLU activations. After processing through these layers, a summation pooling operation is applied, followed by 12 fully connected residual layers that refine the embeddings into a single 1,536-dimensional vector.
106
+
107
+ ---
108
+
109
+ ## Development
110
+
111
+ git clone https://github.com/rcsb/rcsb-embedding-model.git
112
+ cd rcsb-embedding-model
113
+ pip install -e .
114
+ pytest
115
+
116
+ ---
117
+
118
+ ## Citation
119
+
120
+ Segura, J., Bittrich, S., et al. (2024). *Multi-scale structural similarity embedding search across entire proteomes*. bioRxiv. (Preprint: https://www.biorxiv.org/content/10.1101/2024.03.07.XXXXX)
121
+
122
+ ---
123
+
124
+ ## License
125
+
126
+ This project is licensed under the BSD 3-Clause License. See [LICENSE.md](LICENSE.md) for details.
@@ -0,0 +1,24 @@
1
+ rcsb_embedding_model/__init__.py,sha256=r3gLdeBIXkQEQA_K6QcRPO-TtYuAQSutk6pXRUE_nas,120
2
+ rcsb_embedding_model/rcsb_structure_embedding.py,sha256=qGUEdRPjYbsFWThsQa_ZVaSJ7nURnfRBLBqJlLbcY0I,4433
3
+ rcsb_embedding_model/cli/args_utils.py,sha256=7nP2q8pL5dWK_U7opxtWmoFcYVwasky6elHk-dASFaI,165
4
+ rcsb_embedding_model/cli/inference.py,sha256=sx8cGiq_japc0mKFarK1aVkGfK-FhTeZdn_Ng0ijezE,6590
5
+ rcsb_embedding_model/dataset/esm_prot_from_csv.py,sha256=1XMiYyJXfodXZGSrU07uyoYbdKR9-KvNfb1xNqab_W8,2722
6
+ rcsb_embedding_model/dataset/residue_embedding_from_csv.py,sha256=0-5L64tyER-RpT166pC71qxOpUdVZbcuBQONPcAIuno,862
7
+ rcsb_embedding_model/inference/chain_inference.py,sha256=SgXDa-TkDcvlkQxqEwDt81RdE7NmgiaJD8uaROgMbl8,1506
8
+ rcsb_embedding_model/inference/esm_inference.py,sha256=pX-_RhzAIvL0Zdg9wjScLBP6Y1sq4RLNio4-vdR5MLU,1498
9
+ rcsb_embedding_model/inference/structure_inference.py,sha256=qPzAGWyzFWqeKV9yoPSw4LrEB9XgKTJnRQysSBhfg14,1564
10
+ rcsb_embedding_model/model/layers.py,sha256=lhKaWC4gTS_T5lHOP0mgnnP8nKTPEOm4MrjhESA4hE8,743
11
+ rcsb_embedding_model/model/residue_embedding_aggregator.py,sha256=k3UW63Ax8DtjCMdD3O5xNxtyAu28l2n3-Ab6nS0atm0,1967
12
+ rcsb_embedding_model/modules/chain_module.py,sha256=sDSPXJmWuU2C3lt1NorlbUVWZvRSLzumPdFQk01h3VI,403
13
+ rcsb_embedding_model/modules/esm_module.py,sha256=CTHGOATXiarqZsBsZ8oxGJBj20A73186Slpr0EzMJsE,770
14
+ rcsb_embedding_model/modules/structure_module.py,sha256=dEtDNdWo1j2sSDa0JiOHQfEfQzIWqSLEKpvOX0GrXZ4,1048
15
+ rcsb_embedding_model/types/api_types.py,sha256=x7274MyjkRXn8B-W-PY5PK9g0CP1pT_clZbrAuFuHPA,626
16
+ rcsb_embedding_model/utils/data.py,sha256=LGw3wvq_LCcqSovHZacOqxEczn12SZk2i51WK9xkk0k,1877
17
+ rcsb_embedding_model/utils/model.py,sha256=rpZa-gfm3cEtbBd7UXMHrZv3x6f0AC8TJT3gtrSxr5I,852
18
+ rcsb_embedding_model/utils/structure_parser.py,sha256=0lcjCuQMCh0lb3OMj76rqf7kACzJgOwdk3EZ7-ZOQfI,1492
19
+ rcsb_embedding_model/writer/batch_writer.py,sha256=ekgzFZyoKpcnZ3IDP9hfOWBpuHxUQ31P35ViDAi-Edw,2843
20
+ rcsb_embedding_model-0.0.7.dist-info/METADATA,sha256=mfl1YYB48Um5FdZZkHOwzzMPRvsw_HlFHeqXsCGWs0Q,4959
21
+ rcsb_embedding_model-0.0.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
22
+ rcsb_embedding_model-0.0.7.dist-info/entry_points.txt,sha256=MK11jTIEmaV-x4CkPX5IymDaVs7Ky_f2xxU8BJVZ_9Q,69
23
+ rcsb_embedding_model-0.0.7.dist-info/licenses/LICENSE.md,sha256=oUaHiKgfBkChth_Sm67WemEvatO1U0Go8LHjaskXY0w,1522
24
+ rcsb_embedding_model-0.0.7.dist-info/RECORD,,
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ inference = rcsb_embedding_model.cli.inference:app
@@ -1,115 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: rcsb-embedding-model
3
- Version: 0.0.5
4
- Summary: Protein Embedding Model for Structure Search
5
- Project-URL: Homepage, https://github.com/rcsb/rcsb-embedding-model
6
- Project-URL: Issues, https://github.com/rcsb/rcsb-embedding-model/issues
7
- Author-email: Joan Segura <joan.segura@rcsb.org>
8
- License-Expression: BSD-3-Clause
9
- License-File: LICENSE.md
10
- Classifier: Operating System :: OS Independent
11
- Classifier: Programming Language :: Python :: 3
12
- Requires-Python: >=3.10
13
- Requires-Dist: esm>=3.2.0
14
- Requires-Dist: torch>=2.2.0
15
- Description-Content-Type: text/markdown
16
-
17
- # RCSB Embedding Model: A Deep Learning Approach for 3D Structure Embeddings
18
-
19
- ## Overview
20
- RCSB Embedding Model is a PyTorch-based neural network that transforms macromolecular 3D structures into vector embeddings.
21
-
22
- Preprint: [Multi-scale structural similarity embedding search across entire proteomes](https://www.biorxiv.org/content/10.1101/2025.02.28.640875v1).
23
-
24
- A web-based implementation using this model for structure similarity search is available at [rcsb-embedding-search](http://embedding-search.rcsb.org).
25
-
26
- If you are interested in training the model with a new dataset, visit the [rcsb-embedding-search repository](https://github.com/bioinsilico/rcsb-embedding-search), which provides scripts and documentation for training.
27
-
28
- ---
29
-
30
- ## Embedding Model
31
- The embedding model is trained to predict structural similarity by approximating TM-scores using cosine distances between embeddings. It consists of two main components:
32
-
33
- - **Protein Language Model (PLM)**: Computes residue-level embeddings from a given 3D structure.
34
- - **Residue Embedding Aggregator**: A transformer-based neural network that aggregates these residue-level embeddings into a single vector.
35
-
36
- ![Embedding model architecture](assets/embedding-model-architecture.png)
37
-
38
- ### **Protein Language Model (PLM)**
39
- Residue-wise embeddings of protein structures are computed using the [ESM3](https://www.evolutionaryscale.ai/) generative protein language model.
40
-
41
- ### **Residue Embedding Aggregator**
42
- The aggregation component consists of six transformer encoder layers, each with a 3,072-neuron feedforward layer and ReLU activations. After processing through these layers, a summation pooling operation is applied, followed by 12 fully connected residual layers that refine the embeddings into a single 1,536-dimensional vector.
43
-
44
- ---
45
-
46
- ## How to Use the Model
47
- This repository provides the tools to compute embeddings for 3D macromolecular structure data.
48
-
49
- ### **Installation**
50
- `pip install rcsb-embedding-model`
51
-
52
- ### **Requirements**
53
- Ensure you have the following dependencies installed:
54
- - `python >= 3.10`
55
- - `esm`
56
- - `torch`
57
-
58
- ### **Generating Residue Embeddings**
59
- ESM3 embeddings for the 3D structures can be calculated as:
60
-
61
- ```python
62
- from rcsb_embedding_model import RcsbStructureEmbedding
63
-
64
- mmcif_file = "<path_to_file>/<name>.cif"
65
- model = RcsbStructureEmbedding()
66
- res_embedding = model.residue_embedding(
67
- structure_src=mmcif_file,
68
- format="mmcif",
69
- chain_id='A'
70
- )
71
- ```
72
-
73
- ### **Generating Protein Structure Embeddings**
74
- Protein 3D structure embedding can be calculated as:
75
-
76
- ```python
77
- from rcsb_embedding_model import RcsbStructureEmbedding
78
-
79
- mmcif_file = "<path_to_file>/<name>.cif"
80
- model = RcsbStructureEmbedding()
81
- res_embedding = model.residue_embedding(
82
- structure_src=mmcif_file,
83
- format="mmcif",
84
- chain_id='A'
85
- )
86
- structure_embedding = model.aggregator_embedding(
87
- res_embedding
88
- )
89
- ```
90
-
91
- ### **Pretrained Model**
92
- You can download a pretrained Residue Embedding Aggregator model from [Hugging Face](https://huggingface.co/jseguramora/rcsb-embedding-model/resolve/main/rcsb-embedding-model.pt).
93
-
94
- ---
95
-
96
- ## Questions & Issues
97
- For any questions or comments, please open an issue on this repository.
98
-
99
- ---
100
-
101
- ## License
102
- This software is released under the BSD 3-Clause License. See the full license text below.
103
-
104
- ### BSD 3-Clause License
105
-
106
- Copyright (c) 2024, RCSB Protein Data Bank, UC San Diego
107
-
108
- Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
109
-
110
- 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer.
111
- 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution.
112
- 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
113
-
114
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
115
-
@@ -1,8 +0,0 @@
1
- rcsb_embedding_model/__init__.py,sha256=r3gLdeBIXkQEQA_K6QcRPO-TtYuAQSutk6pXRUE_nas,120
2
- rcsb_embedding_model/rcsb_structure_embedding.py,sha256=CIgM3euWK-QO19V_D8flAB9Suv5aLJnU1DDEYea-n6w,6016
3
- rcsb_embedding_model/model/layers.py,sha256=lhKaWC4gTS_T5lHOP0mgnnP8nKTPEOm4MrjhESA4hE8,743
4
- rcsb_embedding_model/model/residue_embedding_aggregator.py,sha256=k3UW63Ax8DtjCMdD3O5xNxtyAu28l2n3-Ab6nS0atm0,1967
5
- rcsb_embedding_model-0.0.5.dist-info/METADATA,sha256=_K496_MAxRYfDiLun4L4LlvCQEUgOSmX7aC7vIrEahw,5381
6
- rcsb_embedding_model-0.0.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
7
- rcsb_embedding_model-0.0.5.dist-info/licenses/LICENSE.md,sha256=oUaHiKgfBkChth_Sm67WemEvatO1U0Go8LHjaskXY0w,1522
8
- rcsb_embedding_model-0.0.5.dist-info/RECORD,,