rcsb-embedding-model 0.0.5__tar.gz → 0.0.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rcsb-embedding-model might be problematic. Click here for more details.

Files changed (32) hide show
  1. rcsb_embedding_model-0.0.7/PKG-INFO +126 -0
  2. rcsb_embedding_model-0.0.7/README.md +108 -0
  3. rcsb_embedding_model-0.0.7/examples/esm_embeddings.py +23 -0
  4. {rcsb_embedding_model-0.0.5 → rcsb_embedding_model-0.0.7}/pyproject.toml +9 -3
  5. rcsb_embedding_model-0.0.7/src/rcsb_embedding_model/cli/args_utils.py +9 -0
  6. rcsb_embedding_model-0.0.7/src/rcsb_embedding_model/cli/inference.py +175 -0
  7. rcsb_embedding_model-0.0.7/src/rcsb_embedding_model/dataset/esm_prot_from_csv.py +90 -0
  8. rcsb_embedding_model-0.0.7/src/rcsb_embedding_model/dataset/residue_embedding_from_csv.py +32 -0
  9. rcsb_embedding_model-0.0.7/src/rcsb_embedding_model/inference/chain_inference.py +50 -0
  10. rcsb_embedding_model-0.0.7/src/rcsb_embedding_model/inference/esm_inference.py +50 -0
  11. rcsb_embedding_model-0.0.7/src/rcsb_embedding_model/inference/structure_inference.py +51 -0
  12. rcsb_embedding_model-0.0.7/src/rcsb_embedding_model/modules/chain_module.py +16 -0
  13. rcsb_embedding_model-0.0.7/src/rcsb_embedding_model/modules/esm_module.py +24 -0
  14. rcsb_embedding_model-0.0.7/src/rcsb_embedding_model/modules/structure_module.py +27 -0
  15. rcsb_embedding_model-0.0.7/src/rcsb_embedding_model/rcsb_structure_embedding.py +127 -0
  16. rcsb_embedding_model-0.0.7/src/rcsb_embedding_model/types/api_types.py +29 -0
  17. rcsb_embedding_model-0.0.7/src/rcsb_embedding_model/utils/data.py +47 -0
  18. rcsb_embedding_model-0.0.7/src/rcsb_embedding_model/utils/model.py +29 -0
  19. rcsb_embedding_model-0.0.7/src/rcsb_embedding_model/utils/structure_parser.py +51 -0
  20. rcsb_embedding_model-0.0.7/src/rcsb_embedding_model/writer/batch_writer.py +113 -0
  21. rcsb_embedding_model-0.0.5/tests/test_model.py → rcsb_embedding_model-0.0.7/tests/test_embedding_model.py +5 -4
  22. rcsb_embedding_model-0.0.5/PKG-INFO +0 -115
  23. rcsb_embedding_model-0.0.5/README.md +0 -99
  24. rcsb_embedding_model-0.0.5/examples/esm_embeddings.py +0 -77
  25. rcsb_embedding_model-0.0.5/src/rcsb_embedding_model/rcsb_structure_embedding.py +0 -171
  26. {rcsb_embedding_model-0.0.5 → rcsb_embedding_model-0.0.7}/.gitignore +0 -0
  27. {rcsb_embedding_model-0.0.5 → rcsb_embedding_model-0.0.7}/LICENSE.md +0 -0
  28. {rcsb_embedding_model-0.0.5 → rcsb_embedding_model-0.0.7}/assets/embedding-model-architecture.png +0 -0
  29. {rcsb_embedding_model-0.0.5 → rcsb_embedding_model-0.0.7}/src/rcsb_embedding_model/__init__.py +0 -0
  30. {rcsb_embedding_model-0.0.5 → rcsb_embedding_model-0.0.7}/src/rcsb_embedding_model/model/layers.py +0 -0
  31. {rcsb_embedding_model-0.0.5 → rcsb_embedding_model-0.0.7}/src/rcsb_embedding_model/model/residue_embedding_aggregator.py +0 -0
  32. {rcsb_embedding_model-0.0.5 → rcsb_embedding_model-0.0.7}/tests/resources/1acb.cif +0 -0
@@ -0,0 +1,126 @@
1
+ Metadata-Version: 2.4
2
+ Name: rcsb-embedding-model
3
+ Version: 0.0.7
4
+ Summary: Protein Embedding Model for Structure Search
5
+ Project-URL: Homepage, https://github.com/rcsb/rcsb-embedding-model
6
+ Project-URL: Issues, https://github.com/rcsb/rcsb-embedding-model/issues
7
+ Author-email: Joan Segura <joan.segura@rcsb.org>
8
+ License-Expression: BSD-3-Clause
9
+ License-File: LICENSE.md
10
+ Classifier: Operating System :: OS Independent
11
+ Classifier: Programming Language :: Python :: 3
12
+ Requires-Python: >=3.10
13
+ Requires-Dist: esm>=3.2.0
14
+ Requires-Dist: lightning>=2.5.0
15
+ Requires-Dist: torch>=2.2.0
16
+ Requires-Dist: typer>=0.15.0
17
+ Description-Content-Type: text/markdown
18
+
19
+ # RCSB Embedding Model
20
+
21
+ **Version** 0.0.7
22
+
23
+
24
+ ## Overview
25
+
26
+ RCSB Embedding Model is a neural network architecture designed to encode macromolecular 3D structures into fixed-length vector embeddings for efficient large-scale structure similarity search.
27
+
28
+ Preprint: [Multi-scale structural similarity embedding search across entire proteomes](https://www.biorxiv.org/content/10.1101/2025.02.28.640875v1).
29
+
30
+ A web-based implementation using this model for structure similarity search is available at [rcsb-embedding-search](http://embedding-search.rcsb.org).
31
+
32
+ If you are interested in training the model with a new dataset, visit the [rcsb-embedding-search repository](https://github.com/bioinsilico/rcsb-embedding-search), which provides scripts and documentation for training.
33
+
34
+
35
+ ## Features
36
+
37
+ - **Residue-level embeddings** computed using the ESM3 protein language model
38
+ - **Structure-level embeddings** aggregated via a transformer-based aggregator network
39
+ - **Command-line interface** implemented with Typer for high-throughput inference workflows
40
+ - **Python API** for interactive embedding computation and integration into analysis pipelines
41
+ - **High-performance inference** leveraging PyTorch Lightning, with multi-node and multi-GPU support
42
+
43
+ ---
44
+
45
+ ## Installation
46
+
47
+ pip install rcsb-embedding-model
48
+
49
+ **Requirements:**
50
+
51
+ - Python ≥ 3.10
52
+ - ESM ≥ 3.2.0
53
+ - PyTorch ≥ 2.2.0
54
+ - Lightning ≥ 2.5.0
55
+ - Typer ≥ 0.15.0
56
+
57
+ ---
58
+
59
+ ## Quick Start
60
+
61
+ ### CLI
62
+
63
+ # 1. Compute residue embeddings: Calculate residue level embeddings of protein structures using ESM3.
64
+ inference residue-embedding --src-file data/structures.csv --output-path results/residue_embeddings --src-format mmcif --batch-size 8 --devices auto
65
+
66
+ # 2. Compute structure embeddings: Calculate single-chain protein embeddings from structural files. Predictions are stored in a single pandas data-frame file.
67
+ inference structure-embedding --src-file results/residue_embeddings.csv --output-path results/structure_embeddings --out-df-id embeddings.pkl --batch-size 4 --devices 0 --devives 1
68
+
69
+ # 3. Compute chain embeddings: Calculate single-chain protein embeddings from residue level embeddings stored as torch tensor files.
70
+ inference chain-embedding --src-file results/residue_embeddings.csv --output-path results/chain_embeddings --batch-size 4
71
+
72
+ ### Python API
73
+
74
+ from rcsb_embedding_model import RcsbStructureEmbedding
75
+
76
+ model = RcsbStructureEmbedding()
77
+
78
+ # Compute per-residue embeddings
79
+ res_emb = model.residue_embedding(
80
+ src_structure="examples/1abc.cif",
81
+ src_format="mmcif",
82
+ chain_id="A"
83
+ )
84
+
85
+ # Aggregate to structure-level embedding
86
+ struct_emb = model.aggregator_embedding(res_emb)
87
+
88
+ See the examples directory for complete scripts.
89
+
90
+ ---
91
+
92
+ ## Model Architecture
93
+
94
+ The embedding model is trained to predict structural similarity by approximating TM-scores using cosine distances between embeddings. It consists of two main components:
95
+
96
+ - **Protein Language Model (PLM)**: Computes residue-level embeddings from a given 3D structure.
97
+ - **Residue Embedding Aggregator**: A transformer-based neural network that aggregates these residue-level embeddings into a single vector.
98
+
99
+ ![Embedding model architecture](assets/embedding-model-architecture.png)
100
+
101
+ ### **Protein Language Model (PLM)**
102
+ Residue-wise embeddings of protein structures are computed using the [ESM3](https://www.evolutionaryscale.ai/) generative protein language model.
103
+
104
+ ### **Residue Embedding Aggregator**
105
+ The aggregation component consists of six transformer encoder layers, each with a 3,072-neuron feedforward layer and ReLU activations. After processing through these layers, a summation pooling operation is applied, followed by 12 fully connected residual layers that refine the embeddings into a single 1,536-dimensional vector.
106
+
107
+ ---
108
+
109
+ ## Development
110
+
111
+ git clone https://github.com/rcsb/rcsb-embedding-model.git
112
+ cd rcsb-embedding-model
113
+ pip install -e .
114
+ pytest
115
+
116
+ ---
117
+
118
+ ## Citation
119
+
120
+ Segura, J., Bittrich, S., et al. (2024). *Multi-scale structural similarity embedding search across entire proteomes*. bioRxiv. (Preprint: https://www.biorxiv.org/content/10.1101/2024.03.07.XXXXX)
121
+
122
+ ---
123
+
124
+ ## License
125
+
126
+ This project is licensed under the BSD 3-Clause License. See [LICENSE.md](LICENSE.md) for details.
@@ -0,0 +1,108 @@
1
+ # RCSB Embedding Model
2
+
3
+ **Version** 0.0.7
4
+
5
+
6
+ ## Overview
7
+
8
+ RCSB Embedding Model is a neural network architecture designed to encode macromolecular 3D structures into fixed-length vector embeddings for efficient large-scale structure similarity search.
9
+
10
+ Preprint: [Multi-scale structural similarity embedding search across entire proteomes](https://www.biorxiv.org/content/10.1101/2025.02.28.640875v1).
11
+
12
+ A web-based implementation using this model for structure similarity search is available at [rcsb-embedding-search](http://embedding-search.rcsb.org).
13
+
14
+ If you are interested in training the model with a new dataset, visit the [rcsb-embedding-search repository](https://github.com/bioinsilico/rcsb-embedding-search), which provides scripts and documentation for training.
15
+
16
+
17
+ ## Features
18
+
19
+ - **Residue-level embeddings** computed using the ESM3 protein language model
20
+ - **Structure-level embeddings** aggregated via a transformer-based aggregator network
21
+ - **Command-line interface** implemented with Typer for high-throughput inference workflows
22
+ - **Python API** for interactive embedding computation and integration into analysis pipelines
23
+ - **High-performance inference** leveraging PyTorch Lightning, with multi-node and multi-GPU support
24
+
25
+ ---
26
+
27
+ ## Installation
28
+
29
+ pip install rcsb-embedding-model
30
+
31
+ **Requirements:**
32
+
33
+ - Python ≥ 3.10
34
+ - ESM ≥ 3.2.0
35
+ - PyTorch ≥ 2.2.0
36
+ - Lightning ≥ 2.5.0
37
+ - Typer ≥ 0.15.0
38
+
39
+ ---
40
+
41
+ ## Quick Start
42
+
43
+ ### CLI
44
+
45
+ # 1. Compute residue embeddings: Calculate residue level embeddings of protein structures using ESM3.
46
+ inference residue-embedding --src-file data/structures.csv --output-path results/residue_embeddings --src-format mmcif --batch-size 8 --devices auto
47
+
48
+ # 2. Compute structure embeddings: Calculate single-chain protein embeddings from structural files. Predictions are stored in a single pandas data-frame file.
49
+ inference structure-embedding --src-file results/residue_embeddings.csv --output-path results/structure_embeddings --out-df-id embeddings.pkl --batch-size 4 --devices 0 --devives 1
50
+
51
+ # 3. Compute chain embeddings: Calculate single-chain protein embeddings from residue level embeddings stored as torch tensor files.
52
+ inference chain-embedding --src-file results/residue_embeddings.csv --output-path results/chain_embeddings --batch-size 4
53
+
54
+ ### Python API
55
+
56
+ from rcsb_embedding_model import RcsbStructureEmbedding
57
+
58
+ model = RcsbStructureEmbedding()
59
+
60
+ # Compute per-residue embeddings
61
+ res_emb = model.residue_embedding(
62
+ src_structure="examples/1abc.cif",
63
+ src_format="mmcif",
64
+ chain_id="A"
65
+ )
66
+
67
+ # Aggregate to structure-level embedding
68
+ struct_emb = model.aggregator_embedding(res_emb)
69
+
70
+ See the examples directory for complete scripts.
71
+
72
+ ---
73
+
74
+ ## Model Architecture
75
+
76
+ The embedding model is trained to predict structural similarity by approximating TM-scores using cosine distances between embeddings. It consists of two main components:
77
+
78
+ - **Protein Language Model (PLM)**: Computes residue-level embeddings from a given 3D structure.
79
+ - **Residue Embedding Aggregator**: A transformer-based neural network that aggregates these residue-level embeddings into a single vector.
80
+
81
+ ![Embedding model architecture](assets/embedding-model-architecture.png)
82
+
83
+ ### **Protein Language Model (PLM)**
84
+ Residue-wise embeddings of protein structures are computed using the [ESM3](https://www.evolutionaryscale.ai/) generative protein language model.
85
+
86
+ ### **Residue Embedding Aggregator**
87
+ The aggregation component consists of six transformer encoder layers, each with a 3,072-neuron feedforward layer and ReLU activations. After processing through these layers, a summation pooling operation is applied, followed by 12 fully connected residual layers that refine the embeddings into a single 1,536-dimensional vector.
88
+
89
+ ---
90
+
91
+ ## Development
92
+
93
+ git clone https://github.com/rcsb/rcsb-embedding-model.git
94
+ cd rcsb-embedding-model
95
+ pip install -e .
96
+ pytest
97
+
98
+ ---
99
+
100
+ ## Citation
101
+
102
+ Segura, J., Bittrich, S., et al. (2024). *Multi-scale structural similarity embedding search across entire proteomes*. bioRxiv. (Preprint: https://www.biorxiv.org/content/10.1101/2024.03.07.XXXXX)
103
+
104
+ ---
105
+
106
+ ## License
107
+
108
+ This project is licensed under the BSD 3-Clause License. See [LICENSE.md](LICENSE.md) for details.
@@ -0,0 +1,23 @@
1
+ import argparse
2
+
3
+ from rcsb_embedding_model import RcsbStructureEmbedding
4
+
5
+ if __name__ == "__main__":
6
+
7
+ parser = argparse.ArgumentParser()
8
+ parser.add_argument('--file', type=str, required=True)
9
+ parser.add_argument('--file_format', type=str)
10
+ parser.add_argument('--chain', type=str)
11
+ args = parser.parse_args()
12
+
13
+ model = RcsbStructureEmbedding()
14
+ res_embedding = model.residue_embedding(
15
+ src_structure=args.file,
16
+ src_format=args.file_format,
17
+ chain_id=args.chain
18
+ )
19
+ structure_embedding = model.aggregator_embedding(
20
+ res_embedding
21
+ )
22
+
23
+ print(res_embedding.shape, structure_embedding.shape)
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "rcsb-embedding-model"
3
- version = "0.0.5"
3
+ version = "0.0.7"
4
4
  authors = [
5
5
  { name="Joan Segura", email="joan.segura@rcsb.org" },
6
6
  ]
@@ -15,8 +15,11 @@ license = "BSD-3-Clause"
15
15
  license-files = ["LICEN[CS]E*"]
16
16
  dependencies=[
17
17
  "esm >= 3.2.0",
18
- "torch >= 2.2.0"
18
+ "torch >= 2.2.0",
19
+ "lightning >= 2.5.0",
20
+ "typer >= 0.15.0"
19
21
  ]
22
+
20
23
  [project.urls]
21
24
  Homepage = "https://github.com/rcsb/rcsb-embedding-model"
22
25
  Issues = "https://github.com/rcsb/rcsb-embedding-model/issues"
@@ -26,4 +29,7 @@ requires = [
26
29
  "hatchling >= 1.14.1"
27
30
  ]
28
31
 
29
- build-backend = "hatchling.build"
32
+ build-backend = "hatchling.build"
33
+
34
+ [project.scripts]
35
+ inference = "rcsb_embedding_model.cli.inference:app"
@@ -0,0 +1,9 @@
1
+
2
+
3
+ def arg_devices(devices):
4
+ if len(devices) == 1:
5
+ return devices[0] if devices[0] == "auto" else int(devices[0])
6
+ return [int(x) for x in devices]
7
+
8
+
9
+
@@ -0,0 +1,175 @@
1
+ from typing import Annotated, List
2
+
3
+ import typer
4
+
5
+ from rcsb_embedding_model.cli.args_utils import arg_devices
6
+ from rcsb_embedding_model.types.api_types import SrcFormat, Accelerator, SrcLocation
7
+
8
+ app = typer.Typer(
9
+ add_completion=False
10
+ )
11
+
12
+
13
+ @app.command(
14
+ name="residue-embedding",
15
+ help="Calculate residue level embeddings of protein structures using ESM3."
16
+ )
17
+ def residue_embedding(
18
+ src_file: Annotated[typer.FileText, typer.Option(
19
+ exists=True,
20
+ file_okay=True,
21
+ dir_okay=False,
22
+ resolve_path=True,
23
+ help='CSV file 3 columns: Structure File Path | Chain Id (asym_i for cif files) | Output file name.'
24
+ )],
25
+ output_path: Annotated[typer.FileText, typer.Option(
26
+ exists=True,
27
+ file_okay=False,
28
+ dir_okay=True,
29
+ resolve_path=True,
30
+ help='Output path to store predictions.'
31
+ )],
32
+ src_location: Annotated[SrcLocation, typer.Option(
33
+ help='Source input location.'
34
+ )] = SrcLocation.local,
35
+ src_format: Annotated[SrcFormat, typer.Option(
36
+ help='Structure file format.'
37
+ )] = SrcFormat.mmcif,
38
+ batch_size: Annotated[int, typer.Option(
39
+ help='Number of samples processed together in one iteration.'
40
+ )] = 1,
41
+ num_workers: Annotated[int, typer.Option(
42
+ help='Number of subprocesses to use for data loading.'
43
+ )] = 0,
44
+ num_nodes: Annotated[int, typer.Option(
45
+ help='Number of nodes to use for inference.'
46
+ )] = 1,
47
+ accelerator: Annotated[Accelerator, typer.Option(
48
+ help='Device used for inference.'
49
+ )] = Accelerator.auto,
50
+ devices: Annotated[List[str], typer.Option(
51
+ help='The devices to use. Can be set to a positive number or "auto". Repeat this argument to indicate multiple indices of devices. "auto" for automatic selection based on the chosen accelerator.'
52
+ )] = tuple(['auto'])
53
+ ):
54
+ from rcsb_embedding_model.inference.esm_inference import predict
55
+ predict(
56
+ csv_file=src_file,
57
+ src_location=src_location,
58
+ src_format=src_format,
59
+ batch_size=batch_size,
60
+ num_workers=num_workers,
61
+ num_nodes=num_nodes,
62
+ accelerator=accelerator,
63
+ devices=arg_devices(devices),
64
+ out_path=output_path
65
+ )
66
+
67
+
68
+ @app.command(
69
+ name="structure-embedding",
70
+ help="Calculate single-chain protein embeddings from structural files. Predictions are stored in a single pandas data-frame file."
71
+ )
72
+ def structure_embedding(
73
+ src_file: Annotated[typer.FileText, typer.Option(
74
+ exists=True,
75
+ file_okay=True,
76
+ dir_okay=False,
77
+ resolve_path=True,
78
+ help='CSV file 3 columns: Structure File Path | Chain Id (asym_i for cif files) | Output file name.'
79
+ )],
80
+ output_path: Annotated[typer.FileText, typer.Option(
81
+ exists=True,
82
+ file_okay=False,
83
+ dir_okay=True,
84
+ resolve_path=True,
85
+ help='Output path to store predictions.'
86
+ )],
87
+ out_df_id: Annotated[str, typer.Option(
88
+ help='File name to store predicted embeddings.'
89
+ )],
90
+ src_location: Annotated[SrcLocation, typer.Option(
91
+ help='Source input location.'
92
+ )] = SrcLocation.local,
93
+ src_format: Annotated[SrcFormat, typer.Option(
94
+ help='Structure file format.'
95
+ )] = SrcFormat.mmcif,
96
+ batch_size: Annotated[int, typer.Option(
97
+ help='Number of samples processed together in one iteration.'
98
+ )] = 1,
99
+ num_workers: Annotated[int, typer.Option(
100
+ help='Number of subprocesses to use for data loading.'
101
+ )] = 0,
102
+ num_nodes: Annotated[int, typer.Option(
103
+ help='Number of nodes to use for inference.'
104
+ )] = 1,
105
+ accelerator: Annotated[Accelerator, typer.Option(
106
+ help='Device used for inference.'
107
+ )] = Accelerator.auto,
108
+ devices: Annotated[List[str], typer.Option(
109
+ help='The devices to use. Can be set to a positive number or "auto". Repeat this argument to indicate multiple indices of devices. "auto" for automatic selection based on the chosen accelerator.'
110
+ )] = tuple(['auto'])
111
+ ):
112
+ from rcsb_embedding_model.inference.structure_inference import predict
113
+ predict(
114
+ csv_file=src_file,
115
+ src_location=src_location,
116
+ src_format=src_format,
117
+ batch_size=batch_size,
118
+ num_workers=num_workers,
119
+ num_nodes=num_nodes,
120
+ accelerator=accelerator,
121
+ devices=arg_devices(devices),
122
+ out_path=output_path,
123
+ out_df_id=out_df_id
124
+ )
125
+
126
+
127
+ @app.command(
128
+ name="chain-embedding",
129
+ help="Calculate single-chain protein embeddings from residue level embeddings stored as torch tensor files."
130
+ )
131
+ def chain_embedding(
132
+ src_file: Annotated[typer.FileText, typer.Option(
133
+ exists=True,
134
+ file_okay=True,
135
+ dir_okay=False,
136
+ resolve_path=True,
137
+ help='CSV file 2 columns: Residue Embedding Tensor File | Output file name.'
138
+ )],
139
+ output_path: Annotated[typer.FileText, typer.Option(
140
+ exists=True,
141
+ file_okay=False,
142
+ dir_okay=True,
143
+ resolve_path=True,
144
+ help='Output path to store predictions.'
145
+ )],
146
+ batch_size: Annotated[int, typer.Option(
147
+ help='Number of samples processed together in one iteration.'
148
+ )] = 1,
149
+ num_workers: Annotated[int, typer.Option(
150
+ help='Number of subprocesses to use for data loading.'
151
+ )] = 0,
152
+ num_nodes: Annotated[int, typer.Option(
153
+ help='Number of nodes to use for inference.'
154
+ )] = 1,
155
+ accelerator: Annotated[Accelerator, typer.Option(
156
+ help='Device used for inference.'
157
+ )] = Accelerator.auto,
158
+ devices: Annotated[List[str], typer.Option(
159
+ help='The devices to use. Can be set to a positive number or "auto". Repeat this argument to indicate multiple indices of devices. "auto" for automatic selection based on the chosen accelerator.'
160
+ )] = tuple(['auto'])
161
+ ):
162
+ from rcsb_embedding_model.inference.chain_inference import predict
163
+ predict(
164
+ csv_file=src_file,
165
+ batch_size=batch_size,
166
+ num_workers=num_workers,
167
+ num_nodes=num_nodes,
168
+ accelerator=accelerator,
169
+ devices=arg_devices(devices),
170
+ out_path=output_path
171
+ )
172
+
173
+
174
+ if __name__ == "__main__":
175
+ app()
@@ -0,0 +1,90 @@
1
+ import argparse
2
+
3
+ import torch
4
+ from biotite.structure import chain_iter
5
+ from esm.models.esm3 import ESM3
6
+ from esm.sdk.api import ESMProtein, SamplingConfig
7
+ from esm.utils.constants.models import ESM3_OPEN_SMALL
8
+ from esm.utils.structure.protein_chain import ProteinChain
9
+ from torch.utils.data import Dataset, DataLoader
10
+ import pandas as pd
11
+
12
+ from rcsb_embedding_model.types.api_types import SrcFormat, SrcLocation
13
+ from rcsb_embedding_model.utils.data import stringio_from_url
14
+ from rcsb_embedding_model.utils.structure_parser import get_structure_from_src
15
+
16
+
17
+ class EsmProtFromCsv(Dataset):
18
+
19
+ MIN_RES = 10
20
+ STREAM_ATTR = 'stream'
21
+ CH_ATTR = 'chain_id'
22
+ NAME_ATTR = 'name'
23
+
24
+ COLUMNS = [STREAM_ATTR, CH_ATTR, NAME_ATTR]
25
+
26
+ def __init__(
27
+ self,
28
+ csv_file,
29
+ src_location=SrcLocation.local,
30
+ src_format=SrcFormat.mmcif,
31
+ ):
32
+ super().__init__()
33
+ self.src_location = src_location
34
+ self.src_format = src_format
35
+ self.data = pd.DataFrame()
36
+ self.__load_stream(csv_file)
37
+
38
+ def __load_stream(self, stream_list):
39
+ self.data = pd.read_csv(
40
+ stream_list,
41
+ header=None,
42
+ index_col=None,
43
+ names=EsmProtFromCsv.COLUMNS
44
+ )
45
+
46
+ def __len__(self):
47
+ return len(self.data)
48
+
49
+ def __getitem__(self, idx):
50
+ src_structure = self.data.loc[idx, EsmProtFromCsv.STREAM_ATTR]
51
+ chain_id = self.data.loc[idx, EsmProtFromCsv.CH_ATTR]
52
+ name = self.data.loc[idx, EsmProtFromCsv.NAME_ATTR]
53
+ structure = get_structure_from_src(
54
+ src_structure=src_structure if self.src_location == SrcLocation.local else stringio_from_url(src_structure),
55
+ src_format=self.src_format,
56
+ chain_id=chain_id
57
+ )
58
+ for atom_ch in chain_iter(structure):
59
+ protein_chain = ProteinChain.from_atomarray(atom_ch)
60
+ return ESMProtein.from_protein_chain(protein_chain), name
61
+
62
+
63
+ if __name__ == '__main__':
64
+
65
+ parser = argparse.ArgumentParser()
66
+ parser.add_argument('--file_list', type=argparse.FileType('r'), required=True)
67
+ args = parser.parse_args()
68
+
69
+ dataset = EsmProtFromCsv(
70
+ args.file_list
71
+ )
72
+
73
+ esm3 = ESM3.from_pretrained(
74
+ ESM3_OPEN_SMALL,
75
+ torch.device("cpu")
76
+ )
77
+
78
+ dataloader = DataLoader(
79
+ dataset,
80
+ batch_size=2,
81
+ collate_fn=lambda _: _
82
+ )
83
+
84
+ for _batch in dataloader:
85
+ for esm_prot, name in _batch:
86
+ protein_tensor = esm3.encode(esm_prot)
87
+ embeddings = esm3.forward_and_sample(
88
+ protein_tensor, SamplingConfig(return_per_residue_embeddings=True)
89
+ ).per_residue_embedding
90
+ print(name, embeddings.shape)
@@ -0,0 +1,32 @@
1
+ import pandas as pd
2
+ import torch
3
+ from torch.utils.data import Dataset
4
+
5
+
6
+ class ResidueEmbeddingFromCSV(Dataset):
7
+
8
+ STREAM_ATTR = 'stream'
9
+ NAME_ATTR = 'name'
10
+
11
+ COLUMNS = [STREAM_ATTR, NAME_ATTR]
12
+
13
+ def __init__(self, csv_file):
14
+ super().__init__()
15
+ self.data = pd.DataFrame()
16
+ self.__load_stream(csv_file)
17
+
18
+ def __load_stream(self, csv_file):
19
+ self.data = pd.read_csv(
20
+ csv_file,
21
+ header=None,
22
+ index_col=None,
23
+ names=ResidueEmbeddingFromCSV.COLUMNS
24
+ )
25
+
26
+ def __len__(self):
27
+ return len(self.data)
28
+
29
+ def __getitem__(self, idx):
30
+ embedding_src = self.data.loc[idx, ResidueEmbeddingFromCSV.STREAM_ATTR]
31
+ name = self.data.loc[idx, ResidueEmbeddingFromCSV.NAME_ATTR]
32
+ return torch.load(embedding_src, map_location=torch.device('cpu')), name
@@ -0,0 +1,50 @@
1
+ from torch.utils.data import DataLoader
2
+ from lightning import Trainer
3
+ from typer import FileText
4
+
5
+ from rcsb_embedding_model.dataset.residue_embedding_from_csv import ResidueEmbeddingFromCSV
6
+ from rcsb_embedding_model.modules.chain_module import ChainModule
7
+ from rcsb_embedding_model.types.api_types import Accelerator, Devices, OptionalPath
8
+ from rcsb_embedding_model.utils.data import collate_seq_embeddings
9
+ from rcsb_embedding_model.writer.batch_writer import CsvBatchWriter
10
+
11
+
12
+ def predict(
13
+ csv_file: FileText,
14
+ batch_size: int = 1,
15
+ num_workers: int = 0,
16
+ num_nodes: int = 1,
17
+ accelerator: Accelerator = Accelerator.auto,
18
+ devices: Devices = 'auto',
19
+ out_path: OptionalPath = None
20
+ ):
21
+ inference_set = ResidueEmbeddingFromCSV(
22
+ csv_file=csv_file
23
+ )
24
+
25
+ inference_dataloader = DataLoader(
26
+ dataset=inference_set,
27
+ batch_size=batch_size,
28
+ num_workers=num_workers,
29
+ collate_fn=lambda emb: (
30
+ collate_seq_embeddings([x for x, z in emb]),
31
+ tuple([z for x, z in emb])
32
+ )
33
+ )
34
+
35
+ module = ChainModule()
36
+
37
+ inference_writer = CsvBatchWriter(out_path) if out_path is not None else None
38
+ trainer = Trainer(
39
+ callbacks=[inference_writer] if inference_writer is not None else None,
40
+ num_nodes=num_nodes,
41
+ accelerator=accelerator,
42
+ devices=devices
43
+ )
44
+
45
+ prediction = trainer.predict(
46
+ module,
47
+ inference_dataloader
48
+ )
49
+
50
+ return prediction