aisp 0.2.0__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
aisp/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
1
  """Artificial Immune Systems Package."""
2
2
 
3
3
  __author__ = "João Paulo da Silva Barros"
4
- __version__ = "0.1.42"
4
+ __version__ = "0.3.0"
aisp/base/__init__.py CHANGED
@@ -1,5 +1,7 @@
1
1
  """Base class modules."""
2
2
 
3
3
  from ._classifier import BaseClassifier
4
+ from ._clusterer import BaseClusterer
5
+ from ._base import set_seed_numba
4
6
 
5
- __all__ = ['BaseClassifier']
7
+ __all__ = ['BaseClassifier', 'BaseClusterer', 'set_seed_numba']
aisp/base/_base.py ADDED
@@ -0,0 +1,65 @@
1
+ """Base class for parameter introspection compatible with the scikit-learn API."""
2
+ import random
3
+
4
+ import numpy as np
5
+ from numba import njit
6
+
7
+
8
+ class Base:
9
+ """
10
+ Generic base class for models with a common interface.
11
+
12
+ Provides the ``get_params`` and ``set_params`` method for compatibility with
13
+ the scikit-learn API, allowing access to the model's public parameters.
14
+ """
15
+
16
+ def set_params(self, **params):
17
+ """
18
+ Set the parameters of the instance.
19
+
20
+ This method is required to ensure compatibility with scikit-learn functions
21
+
22
+ Parameters
23
+ ----------
24
+ **params
25
+ set as attributes on the instance.
26
+
27
+ Returns
28
+ -------
29
+ self
30
+ """
31
+ for key, value in params.items():
32
+ if not key.startswith("_") and hasattr(self, key):
33
+ setattr(self, key, value)
34
+ return self
35
+
36
+ def get_params(self, deep: bool = True) -> dict: # pylint: disable=W0613
37
+ """
38
+ Return a dictionary with the object's main parameters.
39
+
40
+ This method is required to ensure compatibility with scikit-learn functions.
41
+
42
+ Returns
43
+ -------
44
+ dict
45
+ Dictionary containing the object's attributes that do not start with "_".
46
+ """
47
+ return {
48
+ key: value
49
+ for key, value in self.__dict__.items()
50
+ if not key.startswith("_")
51
+ }
52
+
53
+
54
+ @njit(cache=True)
55
+ def set_seed_numba(seed: int):
56
+ """
57
+ Set the seed for random numbers used by functions compiled with Numba.
58
+
59
+ Parameters
60
+ ----------
61
+ seed : int
62
+ Integer value used to initialize Numba's random number generator.
63
+ """
64
+ np.random.seed(seed)
65
+ random.seed(seed)
aisp/base/_classifier.py CHANGED
@@ -5,21 +5,21 @@ from typing import Optional, Union
5
5
 
6
6
  import numpy.typing as npt
7
7
 
8
+ from ._base import Base
8
9
  from ..utils import slice_index_list_by_class
9
10
  from ..utils.metrics import accuracy_score
10
11
 
11
12
 
12
- class BaseClassifier(ABC):
13
+ class BaseClassifier(ABC, Base):
13
14
  """Base class for classification algorithms.
14
15
 
15
- Defines the abstract methods ``fit`` and ``predict``, and implements the ``score``,
16
- ``get_params`` method.
16
+ Defines the abstract methods ``fit`` and ``predict``, and implements the ``score`` method.
17
17
  """
18
18
 
19
- classes: Optional[Union[npt.NDArray, list]] = None
19
+ classes: Union[npt.NDArray, list] = []
20
20
 
21
21
  @abstractmethod
22
- def fit(self, X: npt.NDArray, y: npt.NDArray, verbose: bool = True):
22
+ def fit(self, X: npt.NDArray, y: npt.NDArray, verbose: bool = True) -> "BaseClassifier":
23
23
  """
24
24
  Train the model using the input data X and corresponding labels y.
25
25
 
@@ -83,6 +83,10 @@ class BaseClassifier(ABC):
83
83
  if len(y) == 0:
84
84
  return 0
85
85
  y_pred = self.predict(X)
86
+
87
+ if y_pred is None:
88
+ return 0
89
+
86
90
  return accuracy_score(y, y_pred)
87
91
 
88
92
  def _slice_index_list_by_class(self, y: npt.NDArray) -> dict:
@@ -102,15 +106,3 @@ class BaseClassifier(ABC):
102
106
  A dictionary with the list of array positions(``y``), with the classes as key.
103
107
  """
104
108
  return slice_index_list_by_class(self.classes, y)
105
-
106
- def get_params(self, deep: bool = True) -> dict: # pylint: disable=W0613
107
- """
108
- Return a dictionary with the object's main parameters.
109
-
110
- This method is required to ensure compatibility with scikit-learn functions.
111
- """
112
- return {
113
- key: value
114
- for key, value in self.__dict__.items()
115
- if not key.startswith("_")
116
- }
@@ -0,0 +1,76 @@
1
+ """Base class for clustering algorithms."""
2
+
3
+ from abc import ABC, abstractmethod
4
+ from typing import Optional
5
+
6
+ import numpy.typing as npt
7
+
8
+ from ._base import Base
9
+
10
+
11
+ class BaseClusterer(ABC, Base):
12
+ """Abstract base class for clustering algorithms.
13
+
14
+ This class defines the core interface for clustering models. It enforces
15
+ the implementation of the `fit` and `predict` methods in all derived classes,
16
+ and provides a default implementation for `fit_predict` and `get_params`.
17
+ """
18
+
19
+ @abstractmethod
20
+ def fit(self, X: npt.NDArray, verbose: bool = True) -> "BaseClusterer":
21
+ """
22
+ Train the model using the input data X.
23
+
24
+ This abstract method is implemented by the class that inherits it.
25
+
26
+ Parameters
27
+ ----------
28
+ X : npt.NDArray
29
+ Input data used for training the model.
30
+ verbose : bool, default=True
31
+ Flag to enable or disable detailed output during training.
32
+
33
+ Returns
34
+ -------
35
+ self : BaseClusterer
36
+ Returns the instance of the class that implements this method.
37
+ """
38
+
39
+ @abstractmethod
40
+ def predict(self, X: npt.NDArray) -> Optional[npt.NDArray]:
41
+ """
42
+ Generate predictions based on the input data X.
43
+
44
+ This abstract method is implemented by the class that inherits it.
45
+
46
+ Parameters
47
+ ----------
48
+ X : npt.NDArray
49
+ Input data for which predictions will be generated.
50
+
51
+ Returns
52
+ -------
53
+ predictions : Optional[npt.NDArray]
54
+ Predicted cluster labels for each input sample, or None if prediction is not possible.
55
+ """
56
+
57
+ def fit_predict(self, X, verbose: bool = True):
58
+ """Fit the clustering model to the data and return cluster labels.
59
+
60
+ This is a convenience method that combines `fit` and `predict`
61
+ into a single call.
62
+
63
+ Parameters
64
+ ----------
65
+ X : npt.NDArray
66
+ Input data for which predictions will be generated.
67
+ verbose : bool, default=True
68
+ Flag to enable or disable detailed output during training.
69
+
70
+ Returns
71
+ -------
72
+ predictions : Optional[npt.NDArray]
73
+ Predicted cluster labels for each input sample, or None if prediction is not possible.
74
+ """
75
+ self.fit(X, verbose)
76
+ return self.predict(X)
aisp/base/mutation.py CHANGED
@@ -84,3 +84,47 @@ def clone_and_mutate_binary(
84
84
  clone_set[i] = clone
85
85
 
86
86
  return clone_set
87
+
88
+
89
+ @njit([(types.float64[:], types.int64, types.float64[:, :])], cache=True)
90
+ def clone_and_mutate_ranged(
91
+ vector: npt.NDArray[np.float64],
92
+ n: int,
93
+ bounds: npt.NDArray[np.float64]
94
+ ) -> npt.NDArray[np.float64]:
95
+ """
96
+ Generate a set of mutated clones from a cell represented by custom ranges per dimension.
97
+
98
+ This function creates `n` clones of the input vector and applies random mutations to each of
99
+ them, simulating the process of clonal expansion in artificial immune systems. Each clone
100
+ will have a random number of mutations applied in distinct positions of the original vector.
101
+
102
+ Parameters
103
+ ----------
104
+ vector : npt.NDArray[np.bool_]
105
+ The original immune cell with binary values to be cloned and mutated.
106
+ n : int
107
+ The number of mutated clones to be generated.
108
+ bounds : np.ndarray
109
+ Array (n_features, 2) with min and max per dimension.
110
+
111
+ Returns
112
+ -------
113
+ clone_set : npt.NDArray
114
+ An Array(n, len(vector)) containing the `n` mutated clones of the original vector.
115
+ """
116
+ n_features = vector.shape[0]
117
+ clone_set = np.empty((n, n_features), dtype=np.float64)
118
+
119
+ for i in range(n):
120
+ n_mutations = np.random.randint(1, n_features)
121
+ clone = vector.copy()
122
+ position_mutations = np.random.permutation(n_features)[:n_mutations]
123
+ for j in range(n_mutations):
124
+ idx = position_mutations[j]
125
+ min_limit = bounds[idx, 0]
126
+ max_limit = bounds[idx, 1]
127
+ clone[idx] = np.random.uniform(min_limit, max_limit)
128
+ clone_set[i] = clone
129
+
130
+ return clone_set
aisp/csa/__init__.py CHANGED
@@ -3,7 +3,7 @@
3
3
  CSAs are inspired by the process of antibody proliferation upon detecting an antigen, during which
4
4
  the generated antibodies undergo mutations in an attempt to enhance pathogen recognition.
5
5
  """
6
- from ._ai_immune_recognition_sys import AIRS
6
+ from ._ai_recognition_sys import AIRS
7
7
 
8
8
  __author__ = 'João Paulo da Silva Barros'
9
9
  __all__ = ['AIRS']
@@ -4,16 +4,19 @@ import random
4
4
  from collections import Counter
5
5
  from heapq import nlargest
6
6
  from operator import attrgetter
7
- from typing import List, Literal, Optional, Dict
7
+ from typing import List, Optional, Dict
8
8
 
9
9
  import numpy as np
10
10
  import numpy.typing as npt
11
11
  from scipy.spatial.distance import pdist
12
12
  from tqdm import tqdm
13
13
 
14
+ from ..base import set_seed_numba
14
15
  from ._cell import Cell
15
16
  from ..utils.sanitizers import sanitize_param, sanitize_seed, sanitize_choice
16
17
  from ..utils.distance import hamming, compute_metric_distance, get_metric_code
18
+ from ..utils.types import FeatureType, MetricType
19
+ from ..utils.validation import detect_vector_data_type
17
20
  from ._base import BaseAIRS
18
21
 
19
22
 
@@ -114,20 +117,12 @@ class AIRS(BaseAIRS):
114
117
  * ``'manhattan'`` ➜ The calculation of the distance is given by the expression:
115
118
  ( |x₁ – x₂| + |y₁ – y₂| + ... + |yn – yn|).
116
119
 
117
- algorithm : Literal["continuous-features", "binary-features"], default="continuous-features"
118
- Specifies the type of algorithm to use based on the nature of the input features:
119
-
120
- * ``continuous-features``: selects an algorithm designed for continuous data, which should
121
- be normalized within the range [0, 1].
122
-
123
- * ``binary-features``: selects an algorithm specialized for handling binary variables.
124
-
125
120
  seed : int
126
121
  Seed for the random generation of detector values. Defaults to None.
127
122
 
128
123
  **kwargs
129
124
  p : float
130
- This parameter stores the value of ``p`` used in the Minkowsks distance. The default
125
+ This parameter stores the value of ``p`` used in the Minkowski distance. The default
131
126
  is ``2``, which represents normalized Euclidean distance.\
132
127
  Different values of p lead to different variants of the Minkowski Distance.
133
128
 
@@ -160,11 +155,8 @@ class AIRS(BaseAIRS):
160
155
  k: int = 3,
161
156
  max_iters: int = 100,
162
157
  resource_amplified: float = 1.0,
163
- metric: Literal["manhattan", "minkowski", "euclidean"] = "euclidean",
164
- algorithm: Literal[
165
- "continuous-features", "binary-features"
166
- ] = "continuous-features",
167
- seed: int = None,
158
+ metric: MetricType = "euclidean",
159
+ seed: Optional[int] = None,
168
160
  **kwargs,
169
161
  ) -> None:
170
162
  self.n_resources: float = sanitize_param(n_resources, 10, lambda x: x >= 1)
@@ -183,35 +175,30 @@ class AIRS(BaseAIRS):
183
175
  )
184
176
  self.k: int = sanitize_param(k, 3, lambda x: x > 3)
185
177
  self.max_iters: int = sanitize_param(max_iters, 100, lambda x: x > 0)
186
- self.seed: int = sanitize_seed(seed)
178
+ self.seed: Optional[int] = sanitize_seed(seed)
187
179
  if self.seed is not None:
188
180
  np.random.seed(self.seed)
181
+ set_seed_numba(self.seed)
189
182
 
190
- self.algorithm: Literal["continuous-features", "binary-features"] = (
191
- sanitize_param(
192
- algorithm, "continuous-features", lambda x: x == "binary-features"
193
- )
194
- )
183
+ self._feature_type: FeatureType = "continuous-features"
195
184
 
196
- if algorithm == "binary-features":
197
- self.metric: str = "hamming"
198
- else:
199
- self.metric: str = sanitize_choice(
200
- metric, ["manhattan", "minkowski"], "euclidean"
201
- )
185
+ self.metric = sanitize_choice(
186
+ metric, ["manhattan", "minkowski"], "euclidean"
187
+ )
202
188
 
203
189
  self.p: np.float64 = np.float64(kwargs.get("p", 2.0))
204
190
 
205
191
  self._cells_memory = None
206
192
  self.affinity_threshold = 0.0
207
- self.classes = None
193
+ self.classes = []
194
+ self._bounds: Optional[npt.NDArray[np.float64]] = None
208
195
 
209
196
  @property
210
- def cells_memory(self) -> Dict[str, list[Cell]]:
197
+ def cells_memory(self) -> Optional[Dict[str, list[Cell]]]:
211
198
  """Returns the trained cells memory, organized by class."""
212
199
  return self._cells_memory
213
200
 
214
- def fit(self, X: npt.NDArray, y: npt.NDArray, verbose: bool = True):
201
+ def fit(self, X: npt.NDArray, y: npt.NDArray, verbose: bool = True) -> "AIRS":
215
202
  """
216
203
  Fit the model to the training data using the AIRS.
217
204
 
@@ -233,27 +220,30 @@ class AIRS(BaseAIRS):
233
220
  AIRS
234
221
  Returns the instance itself.
235
222
  """
236
- progress = None
223
+ self._feature_type = detect_vector_data_type(X)
237
224
 
238
- super()._check_and_raise_exceptions_fit(X, y, self.algorithm)
225
+ super()._check_and_raise_exceptions_fit(X, y)
239
226
 
240
- if self.algorithm == "binary-features":
241
- X = X.astype(np.bool_)
227
+ match self._feature_type:
228
+ case "binary-features":
229
+ X = X.astype(np.bool_)
230
+ self.metric = "hamming"
231
+ case "ranged-features":
232
+ self._bounds = np.vstack([np.min(X, axis=0), np.max(X, axis=0)])
242
233
 
243
234
  self.classes = np.unique(y)
244
235
  sample_index = self._slice_index_list_by_class(y)
245
- if verbose:
246
- progress = tqdm(
247
- total=len(y),
248
- postfix="\n",
249
- bar_format="{desc} ┇{bar}┇ {n}/{total} memory cells for each aᵢ",
250
- )
236
+ progress = tqdm(
237
+ total=len(y),
238
+ postfix="\n",
239
+ disable=not verbose,
240
+ bar_format="{desc} ┇{bar}┇ {n}/{total} memory cells for each aᵢ",
241
+ )
251
242
  pool_cells_classes = {}
252
243
  for _class_ in self.classes:
253
- if verbose:
254
- progress.set_description_str(
255
- f"Generating the memory cells for the {_class_} class:"
256
- )
244
+ progress.set_description_str(
245
+ f"Generating the memory cells for the {_class_} class:"
246
+ )
257
247
 
258
248
  x_class = X[sample_index[_class_]]
259
249
  # Calculating the similarity threshold between antigens
@@ -267,7 +257,7 @@ class AIRS(BaseAIRS):
267
257
  for ai in x_class:
268
258
  # Calculating the stimulation of memory cells with aᵢ and selecting the largest
269
259
  # stimulation from the memory set.
270
- c_match = None
260
+ c_match = pool_c[0]
271
261
  match_stimulation = -1
272
262
  for cell in pool_c:
273
263
  stimulation = self._affinity(cell.vector, ai)
@@ -284,7 +274,7 @@ class AIRS(BaseAIRS):
284
274
 
285
275
  set_clones: npt.NDArray = c_match.hyper_clonal_mutate(
286
276
  int(self.rate_hypermutation * self.rate_clonal * match_stimulation),
287
- self.algorithm
277
+ self._feature_type
288
278
  )
289
279
 
290
280
  for clone in set_clones:
@@ -302,15 +292,14 @@ class AIRS(BaseAIRS):
302
292
  if self._affinity(c_candidate.vector, c_match.vector) < sufficiently_similar:
303
293
  pool_c.remove(c_match)
304
294
 
305
- if verbose:
306
- progress.update(1)
295
+ progress.update(1)
307
296
  pool_cells_classes[_class_] = pool_c
308
297
 
309
- if verbose:
310
- progress.set_description(
311
- f"\033[92m✔ Set of memory cells for classes ({', '.join(map(str, self.classes))}) "
312
- f"successfully generated\033[0m"
313
- )
298
+ progress.set_description(
299
+ f"\033[92m✔ Set of memory cells for classes ({', '.join(map(str, self.classes))}) "
300
+ f"successfully generated\033[0m"
301
+ )
302
+ progress.close()
314
303
  self._cells_memory = pool_cells_classes
315
304
  return self
316
305
 
@@ -337,7 +326,7 @@ class AIRS(BaseAIRS):
337
326
  return None
338
327
 
339
328
  super()._check_and_raise_exceptions_predict(
340
- X, len(self._cells_memory[self.classes[0]][0].vector), self.algorithm
329
+ X, len(self._cells_memory[self.classes[0]][0].vector), self._feature_type
341
330
  )
342
331
 
343
332
  c: list = []
@@ -417,7 +406,7 @@ class AIRS(BaseAIRS):
417
406
  random_index = random.randint(0, len(arb_list) - 1)
418
407
  clone_arb = arb_list[random_index].hyper_clonal_mutate(
419
408
  int(self.rate_clonal * c_match_stimulation),
420
- self.algorithm
409
+ self._feature_type
421
410
  )
422
411
 
423
412
  arb_list = [
@@ -446,12 +435,12 @@ class AIRS(BaseAIRS):
446
435
  antigens_list : npt.NDArray
447
436
  List of training antigens.
448
437
  """
449
- if self.algorithm == "binary-features":
438
+ if self._feature_type == "binary-features":
450
439
  distances = pdist(antigens_list, metric="hamming")
451
- elif self.metric == "minkowski":
452
- distances = pdist(antigens_list, metric="minkowski", p=self.p)
453
440
  else:
454
- distances = pdist(antigens_list, metric=self.metric)
441
+ metric_kwargs = {'p': self.p} if self.metric == 'minkowski' else {}
442
+ distances = pdist(antigens_list, metric=self.metric, **metric_kwargs)
443
+
455
444
  n = antigens_list.shape[0]
456
445
  sum_affinity = np.sum(1.0 - (distances / (1.0 + distances)))
457
446
  self.affinity_threshold = 1.0 - (sum_affinity / ((n * (n - 1)) / 2))
@@ -473,7 +462,7 @@ class AIRS(BaseAIRS):
473
462
  The stimulus rate between the vectors.
474
463
  """
475
464
  distance: float
476
- if self.algorithm == "binary-features":
465
+ if self._feature_type == "binary-features":
477
466
  distance = hamming(u, v)
478
467
  else:
479
468
  distance = compute_metric_distance(
aisp/csa/_base.py CHANGED
@@ -1,12 +1,12 @@
1
1
  """Base Class for Clonal Selection Algorithm."""
2
2
 
3
3
  from abc import ABC
4
- from typing import Literal
5
4
 
6
5
  import numpy as np
7
6
  import numpy.typing as npt
8
7
 
9
- from aisp.exceptions import FeatureDimensionMismatch
8
+ from ..exceptions import FeatureDimensionMismatch
9
+ from ..utils.types import FeatureType
10
10
  from ..base import BaseClassifier
11
11
 
12
12
 
@@ -20,11 +20,8 @@ class BaseAIRS(BaseClassifier, ABC):
20
20
 
21
21
  @staticmethod
22
22
  def _check_and_raise_exceptions_fit(
23
- X: npt.NDArray = None,
24
- y: npt.NDArray = None,
25
- algorithm: Literal[
26
- "continuous-features", "binary-features"
27
- ] = "continuous-features"
23
+ X: npt.NDArray,
24
+ y: npt.NDArray
28
25
  ):
29
26
  """
30
27
  Verify the fit parameters and throw exceptions if the verification is not successful.
@@ -36,17 +33,11 @@ class BaseAIRS(BaseClassifier, ABC):
36
33
  [``N samples`` (rows)][``N features`` (columns)].
37
34
  y : npt.NDArray
38
35
  Array of target classes of ``X`` with [``N samples`` (lines)].
39
- algorithm : Literal["continuous-features", "binary-features"], default="continuous-features"
40
- Specifies the type of algorithm to use, depending on whether the input data has
41
- continuous or binary features.
42
36
 
43
37
  Raises
44
38
  ------
45
39
  TypeError:
46
40
  If X or y are not ndarrays or have incompatible shapes.
47
- ValueError
48
- If algorithm is binary-features and X contains values that are not composed only
49
- of 0 and 1.
50
41
  """
51
42
  if not isinstance(X, np.ndarray):
52
43
  if isinstance(X, list):
@@ -63,18 +54,12 @@ class BaseAIRS(BaseClassifier, ABC):
63
54
  "X does not have the same amount of sample for the output classes in y."
64
55
  )
65
56
 
66
- if algorithm == "binary-features" and not np.isin(X, [0, 1]).all():
67
- raise ValueError(
68
- "The array X contains values that are not composed only of 0 and 1."
69
- )
70
57
 
71
58
  @staticmethod
72
59
  def _check_and_raise_exceptions_predict(
73
- X: npt.NDArray = None,
60
+ X: npt.NDArray,
74
61
  expected: int = 0,
75
- algorithm: Literal[
76
- "continuous-features", "binary-features"
77
- ] = "continuous-features"
62
+ feature_type: FeatureType = "continuous-features"
78
63
  ) -> None:
79
64
  """
80
65
  Verify the predict parameters and throw exceptions if the verification is not successful.
@@ -86,8 +71,8 @@ class BaseAIRS(BaseClassifier, ABC):
86
71
  [``N samples`` (rows)][``N features`` (columns)].
87
72
  expected : int, default=0
88
73
  Expected number of features per sample (columns in X).
89
- algorithm : Literal["continuous-features", "binary-features"], default="continuous-features"
90
- Specifies the type of algorithm to use, depending on whether the input data has
74
+ feature_type : FeatureType, default="continuous-features"
75
+ Specifies the type of feature_type to use, depending on whether the input data has
91
76
  continuous or binary features.
92
77
 
93
78
  Raises
@@ -97,7 +82,7 @@ class BaseAIRS(BaseClassifier, ABC):
97
82
  FeatureDimensionMismatch
98
83
  If the number of features in X does not match the expected number.
99
84
  ValueError
100
- If algorithm is binary-features and X contains values that are not composed only
85
+ If feature_type is binary-features and X contains values that are not composed only
101
86
  of 0 and 1.
102
87
  """
103
88
  if not isinstance(X, (np.ndarray, list)):
@@ -109,7 +94,7 @@ class BaseAIRS(BaseClassifier, ABC):
109
94
  "X"
110
95
  )
111
96
 
112
- if algorithm != "binary-features":
97
+ if feature_type != "binary-features":
113
98
  return
114
99
 
115
100
  # Checks if matrix X contains only binary samples. Otherwise, raises an exception.
aisp/csa/_cell.py CHANGED
@@ -1,12 +1,17 @@
1
1
  """Represents a memory B-cell."""
2
2
 
3
3
  from dataclasses import dataclass
4
- from typing import Literal
4
+ from typing import Optional
5
5
 
6
6
  import numpy as np
7
7
  import numpy.typing as npt
8
8
 
9
- from ..base.mutation import clone_and_mutate_continuous, clone_and_mutate_binary
9
+ from ..base.mutation import (
10
+ clone_and_mutate_continuous,
11
+ clone_and_mutate_binary,
12
+ clone_and_mutate_ranged
13
+ )
14
+ from ..utils.types import FeatureType
10
15
 
11
16
 
12
17
  @dataclass(slots=True)
@@ -25,7 +30,8 @@ class Cell:
25
30
  def hyper_clonal_mutate(
26
31
  self,
27
32
  n: int,
28
- algorithm: Literal["continuous-features", "binary-features"] = "continuous-features"
33
+ feature_type: FeatureType = "continuous-features",
34
+ bounds: Optional[npt.NDArray[np.float64]] = None
29
35
  ) -> npt.NDArray:
30
36
  """
31
37
  Clones N features from a cell's features, generating a set of mutated vectors.
@@ -34,14 +40,22 @@ class Cell:
34
40
  ----------
35
41
  n : int
36
42
  Number of clones to be generated from mutations of the original cell.
37
- algorithm : Literal["continuous-features", "binary-features"], default="continuous-features"
38
- Specifies the type of algorithm to use based on the nature of the input features
43
+ feature_type : Literal["binary-features", "continuous-features", "ranged-features"]
44
+ Specifies the type of feature_type to use based on the nature of the input features
45
+ bounds : np.ndarray
46
+ Array (n_features, 2) with min and max per dimension.
39
47
 
40
48
  Returns
41
49
  -------
42
50
  npt.NDArray
43
51
  An array containing N mutated vectors from the original cell.
44
52
  """
45
- if algorithm == "binary-features":
53
+ if feature_type == "binary-features":
46
54
  return clone_and_mutate_binary(self.vector, n)
55
+ if feature_type == "ranged-features" and bounds is not None:
56
+ clone_and_mutate_ranged(self.vector, n, bounds)
47
57
  return clone_and_mutate_continuous(self.vector, n)
58
+
59
+ def __eq__(self, other):
60
+ """Check if two cells are equal."""
61
+ return np.array_equal(self.vector, other.vector)