molcraft 0.1.0a16__py3-none-any.whl → 0.1.0a18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of molcraft might be problematic. Click here for more details.

molcraft/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- __version__ = '0.1.0a16'
1
+ __version__ = '0.1.0a18'
2
2
 
3
3
  import os
4
4
  os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
@@ -6,7 +6,6 @@ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
6
6
  from molcraft import chem
7
7
  from molcraft import features
8
8
  from molcraft import descriptors
9
- from molcraft import conformers
10
9
  from molcraft import featurizers
11
10
  from molcraft import layers
12
11
  from molcraft import models
@@ -15,4 +14,6 @@ from molcraft import records
15
14
  from molcraft import tensors
16
15
  from molcraft import callbacks
17
16
  from molcraft import datasets
18
- from molcraft import losses
17
+ from molcraft import losses
18
+
19
+ from molcraft.applications import proteomics
File without changes
@@ -3,7 +3,7 @@ import keras
3
3
  import numpy as np
4
4
  import tensorflow as tf
5
5
  import tensorflow_text as tf_text
6
- import json
6
+ from rdkit import Chem
7
7
 
8
8
  from molcraft import featurizers
9
9
  from molcraft import tensors
@@ -11,16 +11,47 @@ from molcraft import layers
11
11
  from molcraft import models
12
12
  from molcraft import chem
13
13
 
14
+ """
15
+
16
+
17
+
18
+
19
+
20
+
21
+ No need to correct smiles for modeling, only for interpretation.
22
+
23
+ Use added smiles data to rearrange list of saliency values.
24
+
25
+
26
+
27
+
28
+
29
+
30
+
31
+
32
+
33
+
34
+
35
+
36
+ """
14
37
 
15
38
  # TODO: Add regex pattern for residue (C-term mod + N-term mod)?
16
39
  # TODO: Add regex pattern for residue (C-term mod + N-term mod + mod)?
40
+
41
+ no_mod_pattern = r'([A-Z])'
42
+ side_chain_mod_pattern = r'([A-Z]\[[A-Za-z0-9]+\])'
43
+ n_term_mod_pattern = r'(\[[A-Za-z0-9]+\]-[A-Z])'
44
+ c_term_mod_pattern = r'([A-Z]-\[[A-Za-z0-9]+\])'
45
+ side_chain_and_n_term_mod_pattern = r'(\[[A-Za-z0-9]+\]-[A-Z]\[[A-Za-z0-9]+\])'
46
+ side_chain_and_c_term_mod_pattern = r'([A-Z]\[[A-Za-z0-9]+\]-\[[A-Za-z0-9]+\])'
47
+
17
48
  residue_pattern: str = "|".join([
18
- r'(\[[A-Za-z0-9]+\]-[A-Z]\[[A-Za-z0-9]+\])', # residue (N-term mod + mod)
19
- r'([A-Z]\[[A-Za-z0-9]+\]-\[[A-Za-z0-9]+\])', # residue (C-term mod + mod)
20
- r'([A-Z]-\[[A-Za-z0-9]+\])', # residue (C-term mod)
21
- r'(\[[A-Za-z0-9]+\]-[A-Z])', # residue (N-term mod)
22
- r'([A-Z]\[[A-Za-z0-9]+\])', # residue (mod)
23
- r'([A-Z])', # residue (no mod)
49
+ side_chain_and_n_term_mod_pattern,
50
+ side_chain_and_c_term_mod_pattern,
51
+ n_term_mod_pattern,
52
+ c_term_mod_pattern,
53
+ side_chain_mod_pattern,
54
+ no_mod_pattern
24
55
  ])
25
56
 
26
57
  default_residues: dict[str, str] = {
@@ -46,7 +77,28 @@ default_residues: dict[str, str] = {
46
77
  "Y": "N[C@@H](Cc1ccc(O)cc1)C(=O)O",
47
78
  }
48
79
 
80
+ def has_c_terminal_mod(residue: str):
81
+ if re.search(c_term_mod_pattern, residue):
82
+ return True
83
+ return False
84
+
85
+ def has_n_terminal_mod(residue: str):
86
+ if re.search(n_term_mod_pattern, residue):
87
+ return True
88
+ return False
49
89
 
90
+ # def register_residues(residues: dict[str, str]) -> None:
91
+ # for residue, smiles in residues.items():
92
+ # if residue.startswith('P'):
93
+ # smiles.startswith('N'), f'Incorrect SMILES permutation for {residue}.'
94
+ # elif not residue.startswith('['):
95
+ # smiles.startswith('N[C@@H]'), f'Incorrect SMILES permutation for {residue}.'
96
+ # if len(residue) > 1 and not residue[1] == "-":
97
+ # assert smiles.endswith('C(=O)O'), f'Incorrect SMILES permutation for {residue}.'
98
+ # registered_residues[residue] = smiles
99
+ # registered_residues[residue + '*'] = smiles.strip('O')
100
+
101
+
50
102
  class Peptide(chem.Mol):
51
103
 
52
104
  @classmethod
@@ -65,6 +117,22 @@ class Peptide(chem.Mol):
65
117
  return super().from_encoding(peptide_smiles, **kwargs)
66
118
 
67
119
 
120
+ def permute_residue_smiles(smiles: str) -> str:
121
+ glycine = chem.Mol.from_encoding("NCC(=O)O")
122
+ mol = chem.Mol.from_encoding(smiles)
123
+ nitrogen_index = mol.GetSubstructMatch(glycine)[0]
124
+ permuted_smiles = Chem.MolToSmiles(
125
+ mol, rootedAtAtom=nitrogen_index
126
+ )
127
+ return permuted_smiles
128
+
129
+ def check_peptide_residue_smiles(smiles: list[str]) -> bool:
130
+ backbone = 'NCC(=O)' * (len(smiles) - 1) + 'NC'
131
+ backbone = chem.Mol.from_encoding(backbone)
132
+ mol = chem.Mol.from_encoding(''.join(smiles))
133
+ is_valid = mol.HasSubstructMatch(backbone)
134
+ return is_valid
135
+
68
136
  @keras.saving.register_keras_serializable(package='proteomics')
69
137
  class ResidueEmbedding(keras.layers.Layer):
70
138
 
@@ -72,40 +140,69 @@ class ResidueEmbedding(keras.layers.Layer):
72
140
  self,
73
141
  featurizer: featurizers.MolGraphFeaturizer,
74
142
  embedder: models.GraphModel,
143
+ residues: dict[str, str] | None = None,
75
144
  **kwargs
76
145
  ) -> None:
77
- residues = kwargs.pop('_residues', None)
78
146
  super().__init__(**kwargs)
79
147
  if residues is None:
80
- residues = registered_residues.copy()
81
- self._residues = residues
148
+ residues = {}
82
149
  self.embedder = embedder
83
150
  self.featurizer = featurizer
151
+ self.embedding_dim = int(self.embedder.output.shape[-1])
84
152
  self.ragged_split = SequenceSplitter(pad=False)
85
153
  self.split = SequenceSplitter(pad=True)
154
+ self.use_cached_embeddings = tf.Variable(False)
155
+ self.residues = residues
86
156
  self.supports_masking = True
87
157
 
88
- def build(self, input_shape) -> None:
89
- embedding_dim = self.embedder.output.shape[-1]
90
- residues = sorted(self._residues.keys())
91
- smiles = [self._residues[residue] for residue in residues]
92
- num_residues = len(residues)
93
- self.oov_index = np.where(np.array(residues) == "G")[0][0]
158
+ @property
159
+ def residues(self) -> dict[str, str]:
160
+ return self._residues
161
+
162
+ @residues.setter
163
+ def residues(self, residues: dict[str, str]) -> None:
164
+
165
+ residues = {**default_residues, **residues}
166
+ self._residues = {}
167
+ for residue, smiles in residues.items():
168
+ permuted_smiles = permute_residue_smiles(smiles)
169
+ # Returned smiles should begin with the amino group.
170
+ # It seems that the returned smiles ends with carboxyl group,
171
+ # though we do another check just in case.
172
+ if not has_c_terminal_mod(residue):
173
+ carboxyl_group = 'C(=O)O'
174
+ if not permuted_smiles.endswith(carboxyl_group):
175
+ raise ValueError(
176
+ f'Unsupported permutation of {residue!r} smiles: {permuted_smiles!r}.'
177
+ )
178
+ self._residues[residue] = permuted_smiles
179
+ self._residues[residue + '*'] = permuted_smiles.rstrip('O')
180
+
181
+ residue_keys = sorted(self._residues.keys())
182
+ residue_values = range(len(residue_keys))
183
+ residue_oov_value = np.where(np.array(residue_keys) == "G")[0][0]
184
+
94
185
  self.mapping = tf.lookup.StaticHashTable(
95
186
  tf.lookup.KeyValueTensorInitializer(
96
- keys=residues,
97
- values=range(num_residues)
187
+ keys=residue_keys,
188
+ values=residue_values
98
189
  ),
99
- default_value=-1,
190
+ default_value=residue_oov_value,
100
191
  )
101
- self.graph = tf.stack([self.featurizer(s) for s in smiles], axis=0)
102
- self.cached_embeddings = tf.Variable(
103
- initial_value=tf.zeros((num_residues, embedding_dim))
104
- )
105
- self.use_cached_embeddings = tf.Variable(False)
192
+
193
+ self.graph = tf.stack([
194
+ self.featurizer(self._residues[r]) for r in residue_keys
195
+ ], axis=0)
196
+
197
+ zeros = tf.zeros((residue_values[-1] + 1, self.embedding_dim))
198
+ self.cached_embeddings = tf.Variable(initial_value=zeros)
199
+ _ = self.cache_and_get_embeddings()
200
+
201
+ def build(self, input_shape) -> None:
202
+ self.residues = self._residues
106
203
  super().build(input_shape)
107
204
 
108
- def call(self, sequences, training=None) -> tensors.GraphTensor:
205
+ def call(self, sequences: tf.Tensor, training: bool = None) -> tf.Tensor:
109
206
  if training is False:
110
207
  self.use_cached_embeddings.assign(True)
111
208
  else:
@@ -113,17 +210,16 @@ class ResidueEmbedding(keras.layers.Layer):
113
210
  embeddings = tf.cond(
114
211
  pred=self.use_cached_embeddings,
115
212
  true_fn=lambda: self.cached_embeddings,
116
- false_fn=lambda: self.embeddings(),
213
+ false_fn=lambda: self.cache_and_get_embeddings(),
117
214
  )
118
215
  sequences = self.ragged_split(sequences)
119
216
  sequences = keras.ops.concatenate([
120
217
  tf.strings.join([sequences[:, :-1], '*']), sequences[:, -1:]
121
218
  ], axis=1)
122
219
  indices = self.mapping.lookup(sequences)
123
- indices = keras.ops.where(indices == -1, self.oov_index, indices)
124
220
  return tf.gather(embeddings, indices).to_tensor()
125
221
 
126
- def embeddings(self) -> tf.Tensor:
222
+ def cache_and_get_embeddings(self) -> tf.Tensor:
127
223
  embeddings = self.embedder(self.graph)
128
224
  self.cached_embeddings.assign(embeddings)
129
225
  return embeddings
@@ -139,101 +235,40 @@ class ResidueEmbedding(keras.layers.Layer):
139
235
  def get_config(self) -> dict:
140
236
  config = super().get_config()
141
237
  config.update({
142
- '_residues': self._residues,
143
- 'featurizer': keras.saving.serialize_keras_object(self.featurizer),
144
- 'embedder': keras.saving.serialize_keras_object(self.embedder)
238
+ 'featurizer': keras.saving.serialize_keras_object(
239
+ self.featurizer
240
+ ),
241
+ 'embedder': keras.saving.serialize_keras_object(
242
+ self.embedder
243
+ ),
244
+ 'residues': self._residues,
145
245
  })
146
246
  return config
147
247
 
148
248
  @classmethod
149
249
  def from_config(cls, config: dict) -> 'ResidueEmbedding':
150
- config['featurizer'] = keras.saving.deserialize_keras_object(config['featurizer'])
151
- config['embedder'] = keras.saving.deserialize_keras_object(config['embedder'])
250
+ config['featurizer'] = keras.saving.deserialize_keras_object(
251
+ config['featurizer']
252
+ )
253
+ config['embedder'] = keras.saving.deserialize_keras_object(
254
+ config['embedder']
255
+ )
152
256
  return super().from_config(config)
153
257
 
154
258
 
155
259
  @keras.saving.register_keras_serializable(package='proteomics')
156
- class SequenceSplitter(keras.layers.Layer):
260
+ class SequenceSplitter(keras.layers.Layer):
157
261
 
158
262
  def __init__(self, pad: bool, **kwargs):
159
263
  super().__init__(**kwargs)
160
264
  self.pad = pad
161
265
 
162
- def call(self, inputs):
266
+ def call(self, inputs: tf.Tensor) -> tf.Tensor | tf.RaggedTensor:
163
267
  inputs = tf_text.regex_split(inputs, residue_pattern, residue_pattern)
164
268
  if self.pad:
165
269
  inputs = inputs.to_tensor()
166
270
  return inputs
167
271
 
168
272
 
169
- def interpret(model: keras.models.Model, sequence: list[str]) -> tensors.GraphTensor:
170
-
171
- if not tf.is_tensor(sequence):
172
- sequence = keras.ops.convert_to_tensor(sequence)
173
-
174
- # Find embedding layer
175
- for layer in model.layers:
176
- if isinstance(layer, ResidueEmbedding):
177
- break
178
-
179
- # Use embedding layer to convert the sequence to a graph
180
- residues = layer.ragged_split(sequence)
181
- residues = keras.ops.concatenate([
182
- tf.strings.join([residues[:, :-1], '*']), residues[:, -1:]
183
- ], axis=1)
184
- indices = layer.mapping.lookup(residues)
185
- graph = tf.concat([
186
- layer.graph[residue_ids] for residue_ids in indices
187
- ], axis=0)
188
-
189
- # Define layer which reshapes data into sequences of residue embeddings
190
- num_residues = indices.row_lengths()
191
- to_sequence = (
192
- lambda x: tf.RaggedTensor.from_row_lengths(x, num_residues).to_tensor()
193
- )
194
- reshape = keras.layers.Lambda(to_sequence)
195
-
196
- # Obtain the embedder part of the original model
197
- embedder = layer.embedder
198
- # Obtain the remaining part of the original model
199
- predictor = keras.models.Model(embedder.output, model.output)
200
- # Obtain an 'interpretable model', based on the original model
201
- inputs = layers.Input(graph.spec)
202
- x = inputs
203
- for layer in embedder.layers: # Loop over layers to expose them
204
- x = layer(x)
205
- x = reshape(x)
206
- outputs = predictor(x)
207
- interpretable_model = models.GraphModel(inputs, outputs)
208
-
209
- # Interpret original model through the 'interpretable model'
210
- graph = models.interpret(interpretable_model, graph)
211
- del interpretable_model
212
-
213
- # Update 'size' field with new sizes corresponding to peptides for convenience
214
- # Allows the user to obtain n:th peptide graph using indexing: nth_peptide = graph[n]
215
- peptide_indices = range(len(num_residues))
216
- peptide_indicator = keras.ops.repeat(peptide_indices, num_residues)
217
- residue_sizes = graph.context['size']
218
- peptide_sizes = keras.ops.segment_sum(residue_sizes, peptide_indicator)
219
- return graph.update({'context': {'size': peptide_sizes, 'sequence': sequence}})
220
-
221
-
222
- def register_residues(residues: dict[str, str]) -> None:
223
- # TODO: Implement functions that check if residue has N- or C-terminal mod
224
- # if C-terminal mod, no need to enforce concatenatable perm.
225
- # if N-terminal mod, enforce only 'C(=O)O'
226
- # if normal mod, enforce concatenateable perm ('N[C@@H]' and 'C(=O)O)).
227
- for residue, smiles in residues.items():
228
- if residue.startswith('P'):
229
- smiles.startswith('N'), f'Incorrect SMILES permutation for {residue}.'
230
- elif not residue.startswith('['):
231
- smiles.startswith('N[C@@H]'), f'Incorrect SMILES permutation for {residue}.'
232
- if len(residue) > 1 and not residue[1] == "-":
233
- assert smiles.endswith('C(=O)O'), f'Incorrect SMILES permutation for {residue}.'
234
- registered_residues[residue] = smiles
235
- registered_residues[residue + '*'] = smiles.strip('O')
236
-
237
-
238
- registered_residues: dict[str, str] = {}
239
- register_residues(default_residues)
273
+ # registered_residues: dict[str, str] = {}
274
+ # register_residues(default_residues)
molcraft/chem.py CHANGED
@@ -19,8 +19,6 @@ class Mol(Chem.Mol):
19
19
  @classmethod
20
20
  def from_encoding(cls, encoding: str, explicit_hs: bool = False, **kwargs) -> 'Mol':
21
21
  rdkit_mol = get_mol(encoding, **kwargs)
22
- if not rdkit_mol:
23
- return None
24
22
  if explicit_hs:
25
23
  rdkit_mol = Chem.AddHs(rdkit_mol)
26
24
  rdkit_mol.__class__ = cls
@@ -102,21 +100,13 @@ class Mol(Chem.Mol):
102
100
 
103
101
  def get_conformer(self, index: int = 0) -> 'Conformer':
104
102
  if self.num_conformers == 0:
105
- warnings.warn(
106
- 'Molecule has no conformer. To embed conformer(s), invoke the `embed` method, '
107
- 'and optionally followed by `minimize()` to perform force field minimization.',
108
- stacklevel=2
109
- )
103
+ warnings.warn('Molecule has no conformer.')
110
104
  return None
111
105
  return Conformer.cast(self.GetConformer(index))
112
106
 
113
107
  def get_conformers(self) -> list['Conformer']:
114
108
  if self.num_conformers == 0:
115
- warnings.warn(
116
- 'Molecule has no conformers. To embed conformers, invoke the `embed` method, '
117
- 'and optionally followed by `minimize()` to perform force field minimization.',
118
- stacklevel=2
119
- )
109
+ warnings.warn('Molecule has no conformer.')
120
110
  return []
121
111
  return [Conformer.cast(x) for x in self.GetConformers()]
122
112
 
@@ -222,11 +212,10 @@ def get_mol(
222
212
  else:
223
213
  mol = Chem.MolFromSmiles(encoding, sanitize=False)
224
214
  if mol is not None:
225
- return sanitize_mol(mol, strict, assign_stereo_chemistry)
226
- raise ValueError(
227
- f"{encoding} is invalid; "
228
- f"make sure {encoding} is a valid SMILES or InChI string."
229
- )
215
+ mol = sanitize_mol(mol, strict, assign_stereo_chemistry)
216
+ if mol is not None:
217
+ return mol
218
+ raise ValueError(f'Could not obtain `chem.Mol` from {encoding}.')
230
219
 
231
220
  def get_adjacency_matrix(
232
221
  mol: Chem.Mol,
@@ -402,8 +391,9 @@ def embed_conformers(
402
391
  mol: Mol,
403
392
  num_conformers: int,
404
393
  method: str = 'ETKDGv3',
394
+ random_seed: int | None = None,
405
395
  **kwargs
406
- ) -> None:
396
+ ) -> Mol:
407
397
  available_embedding_methods = {
408
398
  'ETDG': rdDistGeom.ETDG(),
409
399
  'ETKDG': rdDistGeom.ETKDG(),
@@ -423,6 +413,9 @@ def embed_conformers(
423
413
  for key, value in kwargs.items():
424
414
  setattr(embedding_method, key, value)
425
415
 
416
+ if random_seed is not None:
417
+ embedding_method.randomSeed = random_seed
418
+
426
419
  success = rdDistGeom.EmbedMultipleConfs(
427
420
  mol, numConfs=num_conformers, params=embedding_method
428
421
  )
@@ -440,6 +433,8 @@ def embed_conformers(
440
433
  fallback_embedding_method.useRandomCoords = True
441
434
  fallback_embedding_method.maxAttempts = max_attempts
442
435
  fallback_embedding_method.clearConfs = False
436
+ if random_seed is not None:
437
+ fallback_embedding_method.randomSeed = random_seed
443
438
  success = rdDistGeom.EmbedMultipleConfs(
444
439
  mol, numConfs=(num_conformers - num_successes), params=fallback_embedding_method
445
440
  )
@@ -459,7 +454,7 @@ def optimize_conformers(
459
454
  num_threads: bool = 1,
460
455
  ignore_interfragment_interactions: bool = True,
461
456
  vdw_threshold: float = 10.0,
462
- ):
457
+ ) -> Mol:
463
458
  available_force_field_methods = [
464
459
  'MMFF', 'MMFF94', 'MMFF94s', 'UFF'
465
460
  ]
@@ -502,7 +497,7 @@ def prune_conformers(
502
497
  keep: int = 1,
503
498
  threshold: float = 0.0,
504
499
  energy_force_field: str = 'UFF',
505
- ):
500
+ ) -> Mol:
506
501
  if mol.num_conformers == 0:
507
502
  warnings.warn(
508
503
  'Molecule has no conformers. To embed conformers, invoke the `embed` method, '
@@ -539,7 +534,7 @@ def _uff_optimize_conformers(
539
534
  vdw_threshold: float = 10.0,
540
535
  ignore_interfragment_interactions: bool = True,
541
536
  **kwargs,
542
- ) -> Mol:
537
+ ) -> tuple[list[float], list[bool]]:
543
538
  """Universal Force Field Minimization.
544
539
  """
545
540
  results = rdForceFieldHelpers.UFFOptimizeMoleculeConfs(
@@ -560,7 +555,7 @@ def _mmff_optimize_conformers(
560
555
  variant: str = 'MMFF94',
561
556
  ignore_interfragment_interactions: bool = True,
562
557
  **kwargs,
563
- ) -> Mol:
558
+ ) -> tuple[list[float], list[bool]]:
564
559
  """Merck Molecular Force Field Minimization.
565
560
  """
566
561
  if not rdForceFieldHelpers.MMFFHasAllMoleculeParams(mol):
molcraft/datasets.py CHANGED
@@ -11,7 +11,7 @@ def split(
11
11
  test_size: float | None = None,
12
12
  groups: str | np.ndarray = None,
13
13
  shuffle: bool = False,
14
- random_state: int | None = None,
14
+ random_seed: int | None = None,
15
15
  ) -> tuple[np.ndarray | pd.DataFrame, ...]:
16
16
  """Splits the dataset into subsets.
17
17
 
@@ -28,7 +28,7 @@ def split(
28
28
  The groups to perform the splitting on.
29
29
  shuffle:
30
30
  Whether the dataset should be shuffled prior to splitting.
31
- random_state:
31
+ random_seed:
32
32
  The random state/seed. Only applicable if shuffling.
33
33
  """
34
34
  if not isinstance(data, (pd.DataFrame, np.ndarray)):
@@ -69,7 +69,7 @@ def split(
69
69
  train_size += remainder
70
70
 
71
71
  if shuffle:
72
- np.random.seed(random_state)
72
+ np.random.seed(random_seed)
73
73
  np.random.shuffle(indices)
74
74
 
75
75
  train_mask = np.isin(groups, indices[:train_size])
@@ -84,7 +84,7 @@ def cv_split(
84
84
  num_splits: int = 10,
85
85
  groups: str | np.ndarray = None,
86
86
  shuffle: bool = False,
87
- random_state: int | None = None,
87
+ random_seed: int | None = None,
88
88
  ) -> typing.Iterator[
89
89
  tuple[np.ndarray | pd.DataFrame, np.ndarray | pd.DataFrame]
90
90
  ]:
@@ -99,7 +99,7 @@ def cv_split(
99
99
  The groups to perform the splitting on.
100
100
  shuffle:
101
101
  Whether the dataset should be shuffled prior to splitting.
102
- random_state:
102
+ random_seed:
103
103
  The random state/seed. Only applicable if shuffling.
104
104
  """
105
105
  if not isinstance(data, (pd.DataFrame, np.ndarray)):
@@ -119,7 +119,7 @@ def cv_split(
119
119
  f'the data size or the number of groups ({size}).'
120
120
  )
121
121
  if shuffle:
122
- np.random.seed(random_state)
122
+ np.random.seed(random_seed)
123
123
  np.random.shuffle(indices)
124
124
 
125
125
  indices_splits = np.array_split(indices, num_splits)
molcraft/descriptors.py CHANGED
@@ -91,3 +91,17 @@ class NumRings(Descriptor):
91
91
  def call(self, mol: chem.Mol) -> np.ndarray:
92
92
  return rdMolDescriptors.CalcNumRings(mol)
93
93
 
94
+
95
+ @keras.saving.register_keras_serializable(package='molcraft')
96
+ class AtomCount(Descriptor):
97
+
98
+ def __init__(self, atom_type: str, **kwargs):
99
+ super().__init__(**kwargs)
100
+ self.atom_type = atom_type
101
+
102
+ def call(self, mol: chem.Mol) -> np.ndarray:
103
+ count = 0
104
+ for atom in mol.atoms:
105
+ if atom.GetSymbol() == self.atom_type:
106
+ count += 1
107
+ return count
molcraft/features.py CHANGED
@@ -41,11 +41,7 @@ class Feature(abc.ABC):
41
41
 
42
42
  def __call__(self, mol: chem.Mol) -> np.ndarray:
43
43
  if not isinstance(mol, chem.Mol):
44
- raise ValueError(
45
- f'Input to {self.name} needs to be a `chem.Mol`, which '
46
- 'implements two properties that should be iterated over '
47
- 'to compute features: `atoms` and `bonds`.'
48
- )
44
+ raise TypeError(f'Input to {self.name} must be a `chem.Mol` instance.')
49
45
  features = self.call(mol)
50
46
  if len(features) != mol.num_atoms and len(features) != mol.num_bonds:
51
47
  raise ValueError(
@@ -119,59 +115,6 @@ class Feature(abc.ABC):
119
115
  return np.asarray([value], dtype=self.dtype)
120
116
 
121
117
 
122
- @keras.saving.register_keras_serializable(package='molcraft')
123
- class EdgeFeature(Feature):
124
-
125
- def __call__(self, mol: chem.Mol) -> np.ndarray:
126
- if not isinstance(mol, chem.Mol):
127
- raise ValueError(
128
- f'Input to {self.name} needs to be a `chem.Mol`, which '
129
- 'implements two properties that should be iterated over '
130
- 'to compute features: `atoms` and `bonds`.'
131
- )
132
- features = self.call(mol)
133
- if len(features) != int(mol.num_atoms**2):
134
- raise ValueError(
135
- f'The number of features computed by {self.name} does not '
136
- 'match the number of node pairs in the `chem.Mol` object. '
137
- f'Make sure the list of items returned by {self.name}(input) '
138
- 'correspond to node/atom pairs: '
139
- '[(0, 0), (0, 1), ..., (0, N), (1, 0), ... (N, N)], '
140
- 'where N denotes the number of nodes/atoms.'
141
- )
142
- func = (
143
- self._featurize_categorical if self.vocab else
144
- self._featurize_floating
145
- )
146
- return np.asarray([func(x) for x in features], dtype=self.dtype)
147
-
148
-
149
- @keras.saving.register_keras_serializable(package='molcraft')
150
- class Distance(EdgeFeature):
151
-
152
- def __init__(
153
- self,
154
- max_distance: int = None,
155
- allow_oov: int = True,
156
- encode_oov: bool = True,
157
- **kwargs,
158
- ) -> None:
159
- vocab = kwargs.pop('vocab', None)
160
- if not vocab:
161
- if max_distance is None:
162
- max_distance = 20
163
- vocab = list(range(max_distance + 1))
164
- super().__init__(
165
- vocab=vocab,
166
- allow_oov=allow_oov,
167
- encode_oov=encode_oov,
168
- **kwargs
169
- )
170
-
171
- def call(self, mol: chem.Mol) -> list[int]:
172
- return [int(x) for x in chem.get_distances(mol).reshape(-1)]
173
-
174
-
175
118
  @keras.saving.register_keras_serializable(package='molcraft')
176
119
  class AtomType(Feature):
177
120
  def call(self, mol: chem.Mol) -> list[int, float, str]:
@@ -340,6 +283,55 @@ class IsRotatable(Feature):
340
283
  return chem.rotatable_bonds(mol)
341
284
 
342
285
 
286
+ @keras.saving.register_keras_serializable(package='molcraft')
287
+ class PairFeature(Feature):
288
+
289
+ def __call__(self, mol: chem.Mol) -> np.ndarray:
290
+ if not isinstance(mol, chem.Mol):
291
+ raise TypeError(f'Input to {self.name} must be a `chem.Mol` instance.')
292
+ features = self.call(mol)
293
+ if len(features) != int(mol.num_atoms**2):
294
+ raise ValueError(
295
+ f'The number of features computed by {self.name} does not '
296
+ 'match the number of node/atom pairs in the `chem.Mol` object. '
297
+ f'Make sure the list of items returned by {self.name}(input) '
298
+ 'correspond to node/atom pairs: '
299
+ '[(0, 0), (0, 1), ..., (0, N), (1, 0), ... (N, N)], '
300
+ 'where N denotes the number of nodes/atoms.'
301
+ )
302
+ func = (
303
+ self._featurize_categorical if self.vocab else
304
+ self._featurize_floating
305
+ )
306
+ return np.asarray([func(x) for x in features], dtype=self.dtype)
307
+
308
+
309
+ @keras.saving.register_keras_serializable(package='molcraft')
310
+ class PairDistance(PairFeature):
311
+
312
+ def __init__(
313
+ self,
314
+ max_distance: int = None,
315
+ allow_oov: int = True,
316
+ encode_oov: bool = True,
317
+ **kwargs,
318
+ ) -> None:
319
+ vocab = kwargs.pop('vocab', None)
320
+ if not vocab:
321
+ if max_distance is None:
322
+ max_distance = 10
323
+ vocab = list(range(max_distance + 1))
324
+ super().__init__(
325
+ vocab=vocab,
326
+ allow_oov=allow_oov,
327
+ encode_oov=encode_oov,
328
+ **kwargs
329
+ )
330
+
331
+ def call(self, mol: chem.Mol) -> list[int]:
332
+ return [int(x) for x in chem.get_distances(mol).reshape(-1)]
333
+
334
+
343
335
  default_vocabulary = {
344
336
  'AtomType': [
345
337
  '*', 'H', 'He', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne', 'Na',