molcraft 0.1.0a16__tar.gz → 0.1.0a18__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of molcraft might be problematic. Click here for more details.

Files changed (35) hide show
  1. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/PKG-INFO +13 -12
  2. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/README.md +12 -11
  3. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/molcraft/__init__.py +4 -3
  4. molcraft-0.1.0a18/molcraft/applications/chromatography.py +0 -0
  5. molcraft-0.1.0a18/molcraft/applications/proteomics.py +274 -0
  6. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/molcraft/chem.py +17 -22
  7. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/molcraft/datasets.py +6 -6
  8. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/molcraft/descriptors.py +14 -0
  9. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/molcraft/features.py +50 -58
  10. molcraft-0.1.0a18/molcraft/featurizers.py +523 -0
  11. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/molcraft/layers.py +95 -40
  12. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/molcraft/models.py +2 -0
  13. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/molcraft/records.py +24 -15
  14. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/molcraft.egg-info/PKG-INFO +13 -12
  15. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/molcraft.egg-info/SOURCES.txt +1 -1
  16. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/tests/test_featurizers.py +10 -17
  17. molcraft-0.1.0a16/molcraft/applications/proteomics.py +0 -239
  18. molcraft-0.1.0a16/molcraft/conformers.py +0 -151
  19. molcraft-0.1.0a16/molcraft/featurizers.py +0 -753
  20. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/LICENSE +0 -0
  21. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/molcraft/applications/__init__.py +0 -0
  22. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/molcraft/callbacks.py +0 -0
  23. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/molcraft/losses.py +0 -0
  24. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/molcraft/ops.py +0 -0
  25. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/molcraft/tensors.py +0 -0
  26. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/molcraft.egg-info/dependency_links.txt +0 -0
  27. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/molcraft.egg-info/requires.txt +0 -0
  28. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/molcraft.egg-info/top_level.txt +0 -0
  29. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/pyproject.toml +0 -0
  30. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/setup.cfg +0 -0
  31. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/tests/test_chem.py +0 -0
  32. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/tests/test_layers.py +0 -0
  33. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/tests/test_losses.py +0 -0
  34. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/tests/test_models.py +0 -0
  35. {molcraft-0.1.0a16 → molcraft-0.1.0a18}/tests/test_tensors.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: molcraft
3
- Version: 0.1.0a16
3
+ Version: 0.1.0a18
4
4
  Summary: Graph Neural Networks for Molecular Machine Learning
5
5
  Author-email: Alexander Kensert <alexander.kensert@gmail.com>
6
6
  License: MIT License
@@ -43,9 +43,9 @@ Provides-Extra: gpu
43
43
  Requires-Dist: tensorflow[and-cuda]>=2.16; extra == "gpu"
44
44
  Dynamic: license-file
45
45
 
46
- <img src="https://github.com/akensert/molcraft/blob/main/docs/_static/molcraft-logo.png" alt="molcraft-logo">
46
+ <img src="https://github.com/akensert/molcraft/blob/main/docs/_static/molcraft-logo.png" alt="molcraft-logo", width="90%">
47
47
 
48
- **Deep Learning on Molecules**: A Minimalistic GNN package for Molecular ML.
48
+ **Deep Learning on Molecules**: A Minimalistic GNN package for Molecular ML.
49
49
 
50
50
  > [!NOTE]
51
51
  > In progress.
@@ -83,11 +83,12 @@ featurizer = featurizers.MolGraphFeaturizer(
83
83
  features.BondType(),
84
84
  features.IsRotatable(),
85
85
  ],
86
- super_atom=True,
86
+ super_node=True,
87
87
  self_loops=True,
88
+ include_hydrogens=False,
88
89
  )
89
90
 
90
- graph = featurizer([('N[C@@H](C)C(=O)O', 2.0), ('N[C@@H](CS)C(=O)O', 1.0)])
91
+ graph = featurizer([('N[C@@H](C)C(=O)O', 2.5), ('N[C@@H](CS)C(=O)O', 1.5)])
91
92
  print(graph)
92
93
 
93
94
  model = models.GraphModel.from_layers(
@@ -95,13 +96,13 @@ model = models.GraphModel.from_layers(
95
96
  layers.Input(graph.spec),
96
97
  layers.NodeEmbedding(dim=128),
97
98
  layers.EdgeEmbedding(dim=128),
98
- layers.GraphTransformer(units=128),
99
- layers.GraphTransformer(units=128),
100
- layers.GraphTransformer(units=128),
101
- layers.GraphTransformer(units=128),
102
- layers.Readout(mode='mean'),
103
- keras.layers.Dense(units=1024, activation='relu'),
104
- keras.layers.Dense(units=1024, activation='relu'),
99
+ layers.GraphConv(units=128),
100
+ layers.GraphConv(units=128),
101
+ layers.GraphConv(units=128),
102
+ layers.GraphConv(units=128),
103
+ layers.Readout(),
104
+ keras.layers.Dense(units=1024, activation='elu'),
105
+ keras.layers.Dense(units=1024, activation='elu'),
105
106
  keras.layers.Dense(1)
106
107
  ]
107
108
  )
@@ -1,6 +1,6 @@
1
- <img src="https://github.com/akensert/molcraft/blob/main/docs/_static/molcraft-logo.png" alt="molcraft-logo">
1
+ <img src="https://github.com/akensert/molcraft/blob/main/docs/_static/molcraft-logo.png" alt="molcraft-logo", width="90%">
2
2
 
3
- **Deep Learning on Molecules**: A Minimalistic GNN package for Molecular ML.
3
+ **Deep Learning on Molecules**: A Minimalistic GNN package for Molecular ML.
4
4
 
5
5
  > [!NOTE]
6
6
  > In progress.
@@ -38,11 +38,12 @@ featurizer = featurizers.MolGraphFeaturizer(
38
38
  features.BondType(),
39
39
  features.IsRotatable(),
40
40
  ],
41
- super_atom=True,
41
+ super_node=True,
42
42
  self_loops=True,
43
+ include_hydrogens=False,
43
44
  )
44
45
 
45
- graph = featurizer([('N[C@@H](C)C(=O)O', 2.0), ('N[C@@H](CS)C(=O)O', 1.0)])
46
+ graph = featurizer([('N[C@@H](C)C(=O)O', 2.5), ('N[C@@H](CS)C(=O)O', 1.5)])
46
47
  print(graph)
47
48
 
48
49
  model = models.GraphModel.from_layers(
@@ -50,13 +51,13 @@ model = models.GraphModel.from_layers(
50
51
  layers.Input(graph.spec),
51
52
  layers.NodeEmbedding(dim=128),
52
53
  layers.EdgeEmbedding(dim=128),
53
- layers.GraphTransformer(units=128),
54
- layers.GraphTransformer(units=128),
55
- layers.GraphTransformer(units=128),
56
- layers.GraphTransformer(units=128),
57
- layers.Readout(mode='mean'),
58
- keras.layers.Dense(units=1024, activation='relu'),
59
- keras.layers.Dense(units=1024, activation='relu'),
54
+ layers.GraphConv(units=128),
55
+ layers.GraphConv(units=128),
56
+ layers.GraphConv(units=128),
57
+ layers.GraphConv(units=128),
58
+ layers.Readout(),
59
+ keras.layers.Dense(units=1024, activation='elu'),
60
+ keras.layers.Dense(units=1024, activation='elu'),
60
61
  keras.layers.Dense(1)
61
62
  ]
62
63
  )
@@ -1,4 +1,4 @@
1
- __version__ = '0.1.0a16'
1
+ __version__ = '0.1.0a18'
2
2
 
3
3
  import os
4
4
  os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
@@ -6,7 +6,6 @@ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
6
6
  from molcraft import chem
7
7
  from molcraft import features
8
8
  from molcraft import descriptors
9
- from molcraft import conformers
10
9
  from molcraft import featurizers
11
10
  from molcraft import layers
12
11
  from molcraft import models
@@ -15,4 +14,6 @@ from molcraft import records
15
14
  from molcraft import tensors
16
15
  from molcraft import callbacks
17
16
  from molcraft import datasets
18
- from molcraft import losses
17
+ from molcraft import losses
18
+
19
+ from molcraft.applications import proteomics
@@ -0,0 +1,274 @@
1
+ import re
2
+ import keras
3
+ import numpy as np
4
+ import tensorflow as tf
5
+ import tensorflow_text as tf_text
6
+ from rdkit import Chem
7
+
8
+ from molcraft import featurizers
9
+ from molcraft import tensors
10
+ from molcraft import layers
11
+ from molcraft import models
12
+ from molcraft import chem
13
+
14
+ """
15
+
16
+
17
+
18
+
19
+
20
+
21
+ No need to correct smiles for modeling, only for interpretation.
22
+
23
+ Use added smiles data to rearrange list of saliency values.
24
+
25
+
26
+
27
+
28
+
29
+
30
+
31
+
32
+
33
+
34
+
35
+
36
+ """
37
+
38
+ # TODO: Add regex pattern for residue (C-term mod + N-term mod)?
39
+ # TODO: Add regex pattern for residue (C-term mod + N-term mod + mod)?
40
+
41
+ no_mod_pattern = r'([A-Z])'
42
+ side_chain_mod_pattern = r'([A-Z]\[[A-Za-z0-9]+\])'
43
+ n_term_mod_pattern = r'(\[[A-Za-z0-9]+\]-[A-Z])'
44
+ c_term_mod_pattern = r'([A-Z]-\[[A-Za-z0-9]+\])'
45
+ side_chain_and_n_term_mod_pattern = r'(\[[A-Za-z0-9]+\]-[A-Z]\[[A-Za-z0-9]+\])'
46
+ side_chain_and_c_term_mod_pattern = r'([A-Z]\[[A-Za-z0-9]+\]-\[[A-Za-z0-9]+\])'
47
+
48
+ residue_pattern: str = "|".join([
49
+ side_chain_and_n_term_mod_pattern,
50
+ side_chain_and_c_term_mod_pattern,
51
+ n_term_mod_pattern,
52
+ c_term_mod_pattern,
53
+ side_chain_mod_pattern,
54
+ no_mod_pattern
55
+ ])
56
+
57
+ default_residues: dict[str, str] = {
58
+ "A": "N[C@@H](C)C(=O)O",
59
+ "C": "N[C@@H](CS)C(=O)O",
60
+ "D": "N[C@@H](CC(=O)O)C(=O)O",
61
+ "E": "N[C@@H](CCC(=O)O)C(=O)O",
62
+ "F": "N[C@@H](Cc1ccccc1)C(=O)O",
63
+ "G": "NCC(=O)O",
64
+ "H": "N[C@@H](CC1=CN=C-N1)C(=O)O",
65
+ "I": "N[C@@H](C(CC)C)C(=O)O",
66
+ "K": "N[C@@H](CCCCN)C(=O)O",
67
+ "L": "N[C@@H](CC(C)C)C(=O)O",
68
+ "M": "N[C@@H](CCSC)C(=O)O",
69
+ "N": "N[C@@H](CC(=O)N)C(=O)O",
70
+ "P": "N1[C@@H](CCC1)C(=O)O",
71
+ "Q": "N[C@@H](CCC(=O)N)C(=O)O",
72
+ "R": "N[C@@H](CCCNC(=N)N)C(=O)O",
73
+ "S": "N[C@@H](CO)C(=O)O",
74
+ "T": "N[C@@H](C(O)C)C(=O)O",
75
+ "V": "N[C@@H](C(C)C)C(=O)O",
76
+ "W": "N[C@@H](CC(=CN2)C1=C2C=CC=C1)C(=O)O",
77
+ "Y": "N[C@@H](Cc1ccc(O)cc1)C(=O)O",
78
+ }
79
+
80
+ def has_c_terminal_mod(residue: str):
81
+ if re.search(c_term_mod_pattern, residue):
82
+ return True
83
+ return False
84
+
85
+ def has_n_terminal_mod(residue: str):
86
+ if re.search(n_term_mod_pattern, residue):
87
+ return True
88
+ return False
89
+
90
+ # def register_residues(residues: dict[str, str]) -> None:
91
+ # for residue, smiles in residues.items():
92
+ # if residue.startswith('P'):
93
+ # smiles.startswith('N'), f'Incorrect SMILES permutation for {residue}.'
94
+ # elif not residue.startswith('['):
95
+ # smiles.startswith('N[C@@H]'), f'Incorrect SMILES permutation for {residue}.'
96
+ # if len(residue) > 1 and not residue[1] == "-":
97
+ # assert smiles.endswith('C(=O)O'), f'Incorrect SMILES permutation for {residue}.'
98
+ # registered_residues[residue] = smiles
99
+ # registered_residues[residue + '*'] = smiles.strip('O')
100
+
101
+
102
+ class Peptide(chem.Mol):
103
+
104
+ @classmethod
105
+ def from_sequence(cls, sequence: str, **kwargs) -> 'Peptide':
106
+ sequence = [
107
+ match.group(0) for match in re.finditer(residue_pattern, sequence)
108
+ ]
109
+ peptide_smiles = []
110
+ for i, residue in enumerate(sequence):
111
+ if i < len(sequence) - 1:
112
+ residue_smiles = registered_residues[residue + '*']
113
+ else:
114
+ residue_smiles = registered_residues[residue]
115
+ peptide_smiles.append(residue_smiles)
116
+ peptide_smiles = ''.join(peptide_smiles)
117
+ return super().from_encoding(peptide_smiles, **kwargs)
118
+
119
+
120
+ def permute_residue_smiles(smiles: str) -> str:
121
+ glycine = chem.Mol.from_encoding("NCC(=O)O")
122
+ mol = chem.Mol.from_encoding(smiles)
123
+ nitrogen_index = mol.GetSubstructMatch(glycine)[0]
124
+ permuted_smiles = Chem.MolToSmiles(
125
+ mol, rootedAtAtom=nitrogen_index
126
+ )
127
+ return permuted_smiles
128
+
129
+ def check_peptide_residue_smiles(smiles: list[str]) -> bool:
130
+ backbone = 'NCC(=O)' * (len(smiles) - 1) + 'NC'
131
+ backbone = chem.Mol.from_encoding(backbone)
132
+ mol = chem.Mol.from_encoding(''.join(smiles))
133
+ is_valid = mol.HasSubstructMatch(backbone)
134
+ return is_valid
135
+
136
+ @keras.saving.register_keras_serializable(package='proteomics')
137
+ class ResidueEmbedding(keras.layers.Layer):
138
+
139
+ def __init__(
140
+ self,
141
+ featurizer: featurizers.MolGraphFeaturizer,
142
+ embedder: models.GraphModel,
143
+ residues: dict[str, str] | None = None,
144
+ **kwargs
145
+ ) -> None:
146
+ super().__init__(**kwargs)
147
+ if residues is None:
148
+ residues = {}
149
+ self.embedder = embedder
150
+ self.featurizer = featurizer
151
+ self.embedding_dim = int(self.embedder.output.shape[-1])
152
+ self.ragged_split = SequenceSplitter(pad=False)
153
+ self.split = SequenceSplitter(pad=True)
154
+ self.use_cached_embeddings = tf.Variable(False)
155
+ self.residues = residues
156
+ self.supports_masking = True
157
+
158
+ @property
159
+ def residues(self) -> dict[str, str]:
160
+ return self._residues
161
+
162
+ @residues.setter
163
+ def residues(self, residues: dict[str, str]) -> None:
164
+
165
+ residues = {**default_residues, **residues}
166
+ self._residues = {}
167
+ for residue, smiles in residues.items():
168
+ permuted_smiles = permute_residue_smiles(smiles)
169
+ # Returned smiles should begin with the amino group.
170
+ # It seems that the returned smiles ends with carboxyl group,
171
+ # though we do another check just in case.
172
+ if not has_c_terminal_mod(residue):
173
+ carboxyl_group = 'C(=O)O'
174
+ if not permuted_smiles.endswith(carboxyl_group):
175
+ raise ValueError(
176
+ f'Unsupported permutation of {residue!r} smiles: {permuted_smiles!r}.'
177
+ )
178
+ self._residues[residue] = permuted_smiles
179
+ self._residues[residue + '*'] = permuted_smiles.rstrip('O')
180
+
181
+ residue_keys = sorted(self._residues.keys())
182
+ residue_values = range(len(residue_keys))
183
+ residue_oov_value = np.where(np.array(residue_keys) == "G")[0][0]
184
+
185
+ self.mapping = tf.lookup.StaticHashTable(
186
+ tf.lookup.KeyValueTensorInitializer(
187
+ keys=residue_keys,
188
+ values=residue_values
189
+ ),
190
+ default_value=residue_oov_value,
191
+ )
192
+
193
+ self.graph = tf.stack([
194
+ self.featurizer(self._residues[r]) for r in residue_keys
195
+ ], axis=0)
196
+
197
+ zeros = tf.zeros((residue_values[-1] + 1, self.embedding_dim))
198
+ self.cached_embeddings = tf.Variable(initial_value=zeros)
199
+ _ = self.cache_and_get_embeddings()
200
+
201
+ def build(self, input_shape) -> None:
202
+ self.residues = self._residues
203
+ super().build(input_shape)
204
+
205
+ def call(self, sequences: tf.Tensor, training: bool = None) -> tf.Tensor:
206
+ if training is False:
207
+ self.use_cached_embeddings.assign(True)
208
+ else:
209
+ self.use_cached_embeddings.assign(False)
210
+ embeddings = tf.cond(
211
+ pred=self.use_cached_embeddings,
212
+ true_fn=lambda: self.cached_embeddings,
213
+ false_fn=lambda: self.cache_and_get_embeddings(),
214
+ )
215
+ sequences = self.ragged_split(sequences)
216
+ sequences = keras.ops.concatenate([
217
+ tf.strings.join([sequences[:, :-1], '*']), sequences[:, -1:]
218
+ ], axis=1)
219
+ indices = self.mapping.lookup(sequences)
220
+ return tf.gather(embeddings, indices).to_tensor()
221
+
222
+ def cache_and_get_embeddings(self) -> tf.Tensor:
223
+ embeddings = self.embedder(self.graph)
224
+ self.cached_embeddings.assign(embeddings)
225
+ return embeddings
226
+
227
+ def compute_mask(
228
+ self,
229
+ inputs: tensors.GraphTensor,
230
+ mask: bool | None = None
231
+ ) -> tf.Tensor | None:
232
+ sequences = self.split(inputs)
233
+ return keras.ops.not_equal(sequences, '')
234
+
235
+ def get_config(self) -> dict:
236
+ config = super().get_config()
237
+ config.update({
238
+ 'featurizer': keras.saving.serialize_keras_object(
239
+ self.featurizer
240
+ ),
241
+ 'embedder': keras.saving.serialize_keras_object(
242
+ self.embedder
243
+ ),
244
+ 'residues': self._residues,
245
+ })
246
+ return config
247
+
248
+ @classmethod
249
+ def from_config(cls, config: dict) -> 'ResidueEmbedding':
250
+ config['featurizer'] = keras.saving.deserialize_keras_object(
251
+ config['featurizer']
252
+ )
253
+ config['embedder'] = keras.saving.deserialize_keras_object(
254
+ config['embedder']
255
+ )
256
+ return super().from_config(config)
257
+
258
+
259
+ @keras.saving.register_keras_serializable(package='proteomics')
260
+ class SequenceSplitter(keras.layers.Layer):
261
+
262
+ def __init__(self, pad: bool, **kwargs):
263
+ super().__init__(**kwargs)
264
+ self.pad = pad
265
+
266
+ def call(self, inputs: tf.Tensor) -> tf.Tensor | tf.RaggedTensor:
267
+ inputs = tf_text.regex_split(inputs, residue_pattern, residue_pattern)
268
+ if self.pad:
269
+ inputs = inputs.to_tensor()
270
+ return inputs
271
+
272
+
273
+ # registered_residues: dict[str, str] = {}
274
+ # register_residues(default_residues)
@@ -19,8 +19,6 @@ class Mol(Chem.Mol):
19
19
  @classmethod
20
20
  def from_encoding(cls, encoding: str, explicit_hs: bool = False, **kwargs) -> 'Mol':
21
21
  rdkit_mol = get_mol(encoding, **kwargs)
22
- if not rdkit_mol:
23
- return None
24
22
  if explicit_hs:
25
23
  rdkit_mol = Chem.AddHs(rdkit_mol)
26
24
  rdkit_mol.__class__ = cls
@@ -102,21 +100,13 @@ class Mol(Chem.Mol):
102
100
 
103
101
  def get_conformer(self, index: int = 0) -> 'Conformer':
104
102
  if self.num_conformers == 0:
105
- warnings.warn(
106
- 'Molecule has no conformer. To embed conformer(s), invoke the `embed` method, '
107
- 'and optionally followed by `minimize()` to perform force field minimization.',
108
- stacklevel=2
109
- )
103
+ warnings.warn('Molecule has no conformer.')
110
104
  return None
111
105
  return Conformer.cast(self.GetConformer(index))
112
106
 
113
107
  def get_conformers(self) -> list['Conformer']:
114
108
  if self.num_conformers == 0:
115
- warnings.warn(
116
- 'Molecule has no conformers. To embed conformers, invoke the `embed` method, '
117
- 'and optionally followed by `minimize()` to perform force field minimization.',
118
- stacklevel=2
119
- )
109
+ warnings.warn('Molecule has no conformer.')
120
110
  return []
121
111
  return [Conformer.cast(x) for x in self.GetConformers()]
122
112
 
@@ -222,11 +212,10 @@ def get_mol(
222
212
  else:
223
213
  mol = Chem.MolFromSmiles(encoding, sanitize=False)
224
214
  if mol is not None:
225
- return sanitize_mol(mol, strict, assign_stereo_chemistry)
226
- raise ValueError(
227
- f"{encoding} is invalid; "
228
- f"make sure {encoding} is a valid SMILES or InChI string."
229
- )
215
+ mol = sanitize_mol(mol, strict, assign_stereo_chemistry)
216
+ if mol is not None:
217
+ return mol
218
+ raise ValueError(f'Could not obtain `chem.Mol` from {encoding}.')
230
219
 
231
220
  def get_adjacency_matrix(
232
221
  mol: Chem.Mol,
@@ -402,8 +391,9 @@ def embed_conformers(
402
391
  mol: Mol,
403
392
  num_conformers: int,
404
393
  method: str = 'ETKDGv3',
394
+ random_seed: int | None = None,
405
395
  **kwargs
406
- ) -> None:
396
+ ) -> Mol:
407
397
  available_embedding_methods = {
408
398
  'ETDG': rdDistGeom.ETDG(),
409
399
  'ETKDG': rdDistGeom.ETKDG(),
@@ -423,6 +413,9 @@ def embed_conformers(
423
413
  for key, value in kwargs.items():
424
414
  setattr(embedding_method, key, value)
425
415
 
416
+ if random_seed is not None:
417
+ embedding_method.randomSeed = random_seed
418
+
426
419
  success = rdDistGeom.EmbedMultipleConfs(
427
420
  mol, numConfs=num_conformers, params=embedding_method
428
421
  )
@@ -440,6 +433,8 @@ def embed_conformers(
440
433
  fallback_embedding_method.useRandomCoords = True
441
434
  fallback_embedding_method.maxAttempts = max_attempts
442
435
  fallback_embedding_method.clearConfs = False
436
+ if random_seed is not None:
437
+ fallback_embedding_method.randomSeed = random_seed
443
438
  success = rdDistGeom.EmbedMultipleConfs(
444
439
  mol, numConfs=(num_conformers - num_successes), params=fallback_embedding_method
445
440
  )
@@ -459,7 +454,7 @@ def optimize_conformers(
459
454
  num_threads: bool = 1,
460
455
  ignore_interfragment_interactions: bool = True,
461
456
  vdw_threshold: float = 10.0,
462
- ):
457
+ ) -> Mol:
463
458
  available_force_field_methods = [
464
459
  'MMFF', 'MMFF94', 'MMFF94s', 'UFF'
465
460
  ]
@@ -502,7 +497,7 @@ def prune_conformers(
502
497
  keep: int = 1,
503
498
  threshold: float = 0.0,
504
499
  energy_force_field: str = 'UFF',
505
- ):
500
+ ) -> Mol:
506
501
  if mol.num_conformers == 0:
507
502
  warnings.warn(
508
503
  'Molecule has no conformers. To embed conformers, invoke the `embed` method, '
@@ -539,7 +534,7 @@ def _uff_optimize_conformers(
539
534
  vdw_threshold: float = 10.0,
540
535
  ignore_interfragment_interactions: bool = True,
541
536
  **kwargs,
542
- ) -> Mol:
537
+ ) -> tuple[list[float], list[bool]]:
543
538
  """Universal Force Field Minimization.
544
539
  """
545
540
  results = rdForceFieldHelpers.UFFOptimizeMoleculeConfs(
@@ -560,7 +555,7 @@ def _mmff_optimize_conformers(
560
555
  variant: str = 'MMFF94',
561
556
  ignore_interfragment_interactions: bool = True,
562
557
  **kwargs,
563
- ) -> Mol:
558
+ ) -> tuple[list[float], list[bool]]:
564
559
  """Merck Molecular Force Field Minimization.
565
560
  """
566
561
  if not rdForceFieldHelpers.MMFFHasAllMoleculeParams(mol):
@@ -11,7 +11,7 @@ def split(
11
11
  test_size: float | None = None,
12
12
  groups: str | np.ndarray = None,
13
13
  shuffle: bool = False,
14
- random_state: int | None = None,
14
+ random_seed: int | None = None,
15
15
  ) -> tuple[np.ndarray | pd.DataFrame, ...]:
16
16
  """Splits the dataset into subsets.
17
17
 
@@ -28,7 +28,7 @@ def split(
28
28
  The groups to perform the splitting on.
29
29
  shuffle:
30
30
  Whether the dataset should be shuffled prior to splitting.
31
- random_state:
31
+ random_seed:
32
32
  The random state/seed. Only applicable if shuffling.
33
33
  """
34
34
  if not isinstance(data, (pd.DataFrame, np.ndarray)):
@@ -69,7 +69,7 @@ def split(
69
69
  train_size += remainder
70
70
 
71
71
  if shuffle:
72
- np.random.seed(random_state)
72
+ np.random.seed(random_seed)
73
73
  np.random.shuffle(indices)
74
74
 
75
75
  train_mask = np.isin(groups, indices[:train_size])
@@ -84,7 +84,7 @@ def cv_split(
84
84
  num_splits: int = 10,
85
85
  groups: str | np.ndarray = None,
86
86
  shuffle: bool = False,
87
- random_state: int | None = None,
87
+ random_seed: int | None = None,
88
88
  ) -> typing.Iterator[
89
89
  tuple[np.ndarray | pd.DataFrame, np.ndarray | pd.DataFrame]
90
90
  ]:
@@ -99,7 +99,7 @@ def cv_split(
99
99
  The groups to perform the splitting on.
100
100
  shuffle:
101
101
  Whether the dataset should be shuffled prior to splitting.
102
- random_state:
102
+ random_seed:
103
103
  The random state/seed. Only applicable if shuffling.
104
104
  """
105
105
  if not isinstance(data, (pd.DataFrame, np.ndarray)):
@@ -119,7 +119,7 @@ def cv_split(
119
119
  f'the data size or the number of groups ({size}).'
120
120
  )
121
121
  if shuffle:
122
- np.random.seed(random_state)
122
+ np.random.seed(random_seed)
123
123
  np.random.shuffle(indices)
124
124
 
125
125
  indices_splits = np.array_split(indices, num_splits)
@@ -91,3 +91,17 @@ class NumRings(Descriptor):
91
91
  def call(self, mol: chem.Mol) -> np.ndarray:
92
92
  return rdMolDescriptors.CalcNumRings(mol)
93
93
 
94
+
95
+ @keras.saving.register_keras_serializable(package='molcraft')
96
+ class AtomCount(Descriptor):
97
+
98
+ def __init__(self, atom_type: str, **kwargs):
99
+ super().__init__(**kwargs)
100
+ self.atom_type = atom_type
101
+
102
+ def call(self, mol: chem.Mol) -> np.ndarray:
103
+ count = 0
104
+ for atom in mol.atoms:
105
+ if atom.GetSymbol() == self.atom_type:
106
+ count += 1
107
+ return count