pyerualjetwork 5.1__py3-none-any.whl → 5.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +15 -14
- pyerualjetwork/cpu/__init__.py +24 -0
- pyerualjetwork/{activation_functions_cpu.py → cpu/activation_functions.py} +40 -4
- pyerualjetwork/{data_operations_cpu.py → cpu/data_ops.py} +17 -19
- pyerualjetwork/{metrics_cpu.py → cpu/metrics.py} +3 -1
- pyerualjetwork/{visualizations_cpu.py → cpu/visualizations.py} +96 -139
- pyerualjetwork/cuda/__init__.py +24 -0
- pyerualjetwork/{activation_functions_cuda.py → cuda/activation_functions.py} +54 -5
- pyerualjetwork/{data_operations_cuda.py → cuda/data_ops.py} +16 -16
- pyerualjetwork/{metrics_cuda.py → cuda/metrics.py} +1 -1
- pyerualjetwork/{visualizations_cuda.py → cuda/visualizations.py} +8 -244
- pyerualjetwork/{ene_cpu.py → ene.py} +29 -95
- pyerualjetwork/fitness_functions.py +0 -1
- pyerualjetwork/help.py +5 -5
- pyerualjetwork/issue_solver.py +39 -11
- pyerualjetwork/{memory_operations.py → memory_ops.py} +1 -1
- pyerualjetwork/model_ops.py +734 -0
- pyerualjetwork/{neu_cpu.py → nn.py} +199 -91
- pyerualjetwork/{model_operations_cpu.py → old_cpu_model_ops.py} +62 -59
- pyerualjetwork/{model_operations_cuda.py → old_cuda_model_ops.py} +99 -86
- {pyerualjetwork-5.1.dist-info → pyerualjetwork-5.5.dist-info}/METADATA +16 -18
- pyerualjetwork-5.5.dist-info/RECORD +27 -0
- pyerualjetwork/ene_cuda.py +0 -962
- pyerualjetwork/neu_cuda.py +0 -588
- pyerualjetwork-5.1.dist-info/RECORD +0 -26
- /pyerualjetwork/{loss_functions_cpu.py → cpu/loss_functions.py} +0 -0
- /pyerualjetwork/{loss_functions_cuda.py → cuda/loss_functions.py} +0 -0
- {pyerualjetwork-5.1.dist-info → pyerualjetwork-5.5.dist-info}/WHEEL +0 -0
- {pyerualjetwork-5.1.dist-info → pyerualjetwork-5.5.dist-info}/top_level.txt +0 -0
pyerualjetwork/ene_cuda.py
DELETED
@@ -1,962 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
|
3
|
-
|
4
|
-
ENE (Eugenic NeuroEvolution) on CUDA
|
5
|
-
====================================
|
6
|
-
|
7
|
-
This module contains all the functions necessary for implementing and testing the ENE (Eugenic NeuroEvolution) algorithm on CUDA GPU.
|
8
|
-
For more information about the ENE algorithm: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PLAN/PLAN.pdf
|
9
|
-
|
10
|
-
Module functions:
|
11
|
-
-----------------
|
12
|
-
- evolver()
|
13
|
-
- define_genomes()
|
14
|
-
- evaluate()
|
15
|
-
- cross_over()
|
16
|
-
- mutation()
|
17
|
-
- dominant_parent_selection()
|
18
|
-
- second_parent_selection()
|
19
|
-
|
20
|
-
Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork/ExampleCodes
|
21
|
-
|
22
|
-
PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
23
|
-
|
24
|
-
- Author: Hasan Can Beydili
|
25
|
-
- YouTube: https://www.youtube.com/@HasanCanBeydili
|
26
|
-
- Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
|
27
|
-
- Instagram: https://www.instagram.com/canbeydilj
|
28
|
-
- Contact: tchasancan@gmail.com
|
29
|
-
"""
|
30
|
-
|
31
|
-
import cupy as cp
|
32
|
-
import numpy as np
|
33
|
-
import random
|
34
|
-
import math
|
35
|
-
import copy
|
36
|
-
|
37
|
-
### LIBRARY IMPORTS ###
|
38
|
-
from .data_operations_cuda import normalization, non_neg_normalization
|
39
|
-
from .ui import loading_bars, initialize_loading_bar
|
40
|
-
from .activation_functions_cuda import apply_activation, all_activations
|
41
|
-
|
42
|
-
def define_genomes(input_shape, output_shape, population_size, neurons=[], activation_functions=[], dtype=cp.float32):
|
43
|
-
"""
|
44
|
-
Initializes a population of genomes, where each genome is represented by a set of weights
|
45
|
-
and an associated activation function. Each genome is created with random weights and activation
|
46
|
-
functions are applied and normalized. (Max abs normalization.)
|
47
|
-
|
48
|
-
Args:
|
49
|
-
|
50
|
-
input_shape (int): The number of input features for the neural network.
|
51
|
-
|
52
|
-
output_shape (int): The number of output features for the neural network.
|
53
|
-
|
54
|
-
population_size (int): The number of genomes (individuals) in the population.
|
55
|
-
|
56
|
-
neurons (list[int], optional): If you dont want train PLAN model this parameter represents neuron count of each hidden layer for MLP. Default: None (PLAN)
|
57
|
-
|
58
|
-
activation_functions (list[str], optional): If you dont want train PLAN model this parameter represents activation function of each hidden layer for MLP. Default: None (PLAN) NOTE: THIS EFFECTS HIDDEN LAYERS OUTPUT. NOT OUTPUT LAYER!
|
59
|
-
|
60
|
-
dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16.
|
61
|
-
|
62
|
-
Returns:
|
63
|
-
tuple: A tuple containing:
|
64
|
-
- population_weights (list[cupy.ndarray]): representing the
|
65
|
-
weight matrices for each genome.
|
66
|
-
- population_activations (list): A list of activation functions applied to each genome.
|
67
|
-
|
68
|
-
Notes:
|
69
|
-
The weights are initialized randomly within the range [-1, 1].
|
70
|
-
Activation functions are selected randomly from a predefined list `all_activations()`.
|
71
|
-
The weights for each genome are then modified by applying the corresponding activation function
|
72
|
-
and normalized using the `normalization()` function. (Max abs normalization.)
|
73
|
-
"""
|
74
|
-
|
75
|
-
hidden = len(neurons)
|
76
|
-
|
77
|
-
if hidden > 0:
|
78
|
-
population_weights = [[0] * (hidden + 1) for _ in range(population_size)]
|
79
|
-
population_activations = [[0] * (hidden) for _ in range(population_size)]
|
80
|
-
|
81
|
-
if len(neurons) != hidden:
|
82
|
-
raise ValueError('hidden parameter and neurons list length must be equal.')
|
83
|
-
|
84
|
-
|
85
|
-
for i in range(len(population_weights)):
|
86
|
-
|
87
|
-
for l in range(hidden + 1):
|
88
|
-
|
89
|
-
if l == 0:
|
90
|
-
population_weights[i][l] = cp.random.uniform(-1, 1, (neurons[l], input_shape)).astype(dtype)
|
91
|
-
|
92
|
-
elif l == hidden:
|
93
|
-
population_weights[i][l] = cp.random.uniform(-1, 1, (output_shape, neurons[l-1])).astype(dtype)
|
94
|
-
|
95
|
-
else:
|
96
|
-
population_weights[i][l] = cp.random.uniform(-1, 1, (neurons[l], neurons[l-1])).astype(dtype)
|
97
|
-
|
98
|
-
if l != hidden:
|
99
|
-
population_activations[i][l] = activation_functions[l]
|
100
|
-
|
101
|
-
# ACTIVATIONS APPLYING IN WEIGHTS SPECIFIC OUTPUT CONNECTIONS (MORE PLAN LIKE FEATURES(FOR NON-LINEARITY)):
|
102
|
-
|
103
|
-
for j in range(population_weights[i][l].shape[0]):
|
104
|
-
|
105
|
-
population_weights[i][l][j,:] = apply_activation(population_weights[i][l][j,:], population_activations[i])
|
106
|
-
population_weights[i][l][j,:] = normalization(population_weights[i][l][j,:], dtype=dtype)
|
107
|
-
|
108
|
-
return population_weights, population_activations
|
109
|
-
|
110
|
-
else:
|
111
|
-
population_weights = [0] * population_size
|
112
|
-
population_activations = [0] * population_size
|
113
|
-
|
114
|
-
except_this = ['spiral', 'circular']
|
115
|
-
activations = [item for item in all_activations() if item not in except_this] # SPIRAL AND CIRCULAR ACTIVATION DISCARDED
|
116
|
-
|
117
|
-
for i in range(len(population_weights)):
|
118
|
-
|
119
|
-
population_weights[i] = cp.random.uniform(-1, 1, (output_shape, input_shape)).astype(dtype, copy=False)
|
120
|
-
population_activations[i] = activations[int(random.uniform(0, len(activations)-1))]
|
121
|
-
|
122
|
-
# ACTIVATIONS APPLYING IN WEIGHTS SPECIFIC OUTPUT CONNECTIONS (MORE PLAN LIKE FEATURES(FOR NON-LINEARITY)):
|
123
|
-
|
124
|
-
for j in range(population_weights[i].shape[0]):
|
125
|
-
|
126
|
-
population_weights[i][j,:] = apply_activation(population_weights[i][j,:], population_activations[i])
|
127
|
-
population_weights[i][j,:] = normalization(population_weights[i][j,:], dtype=dtype)
|
128
|
-
|
129
|
-
return cp.array(population_weights, dtype=dtype), population_activations
|
130
|
-
|
131
|
-
def evolver(weights,
|
132
|
-
activations,
|
133
|
-
what_gen,
|
134
|
-
fitness,
|
135
|
-
weight_evolve=True,
|
136
|
-
show_info=False,
|
137
|
-
policy='aggressive',
|
138
|
-
bad_genomes_selection_prob=None,
|
139
|
-
bar_status=True,
|
140
|
-
strategy='normal_selective',
|
141
|
-
bad_genomes_mutation_prob=None,
|
142
|
-
fitness_bias=1,
|
143
|
-
cross_over_mode='tpm',
|
144
|
-
activation_mutate_add_prob=0.5,
|
145
|
-
activation_mutate_delete_prob=0.5,
|
146
|
-
activation_mutate_change_prob=0.5,
|
147
|
-
activation_selection_add_prob=0.5,
|
148
|
-
activation_selection_change_prob=0.5,
|
149
|
-
activation_selection_threshold=20,
|
150
|
-
activation_mutate_prob=0.5,
|
151
|
-
activation_mutate_threshold=20,
|
152
|
-
weight_mutate_threshold=16,
|
153
|
-
weight_mutate_prob=1,
|
154
|
-
is_mlp=False,
|
155
|
-
save_best_genome=False,
|
156
|
-
dtype=cp.float32):
|
157
|
-
"""
|
158
|
-
Applies the evolving process of a population of genomes using selection, crossover, mutation, and activation function potentiation.
|
159
|
-
The function modifies the population's weights and activation functions based on a specified policy, mutation probabilities, and strategy.
|
160
|
-
|
161
|
-
'selection' args effects cross-over.
|
162
|
-
'mutate' args effects mutation.
|
163
|
-
|
164
|
-
Args:
|
165
|
-
weights (cupy.ndarray): Array of weights for each genome.
|
166
|
-
(first returned value of define_genomes function)
|
167
|
-
|
168
|
-
activations (list[str]): A list of activation functions for each genome.
|
169
|
-
(second returned value of define_genomes function) NOTE!: 'activation potentiations' for PLAN 'activation functions' for MLP.
|
170
|
-
|
171
|
-
what_gen (int): The current generation number, used for informational purposes or logging.
|
172
|
-
|
173
|
-
fitness (cupy.ndarray): A 1D array containing the fitness values of each genome.
|
174
|
-
The array is used to rank the genomes based on their performance. PLANEAT maximizes or minimizes this fitness based on the `target_fitness` parameter.
|
175
|
-
|
176
|
-
weight_evolve (bool, optional): Are weights to be evolves or just activation combinations Default: True. Note: Regardless of whether this parameter is True or False, you must give the evolver function a list of weights equal to the number of activation potentiations. You can create completely random weights if you want. If this parameter is False, the weights entering the evolver function and the resulting weights will be exactly the same.
|
177
|
-
|
178
|
-
show_info (bool, optional): If True, prints information about the current generation and the
|
179
|
-
maximum reward obtained. Also shows the current configuration. Default is False.
|
180
|
-
|
181
|
-
strategy (str, optional): The strategy for combining the best and bad genomes. Options:
|
182
|
-
- 'normal_selective': Normal selection based on reward, where a portion of the bad genes are discarded.
|
183
|
-
- 'more_selective': A more selective strategy, where fewer bad genes survive.
|
184
|
-
- 'less_selective': A less selective strategy, where more bad genes survive.
|
185
|
-
Default is 'normal_selective'.
|
186
|
-
|
187
|
-
bar_status (bool, optional): Loading bar status during evolving process of genomes. True or False. Default: True
|
188
|
-
|
189
|
-
policy (str, optional): The selection policy that governs how genomes are selected for reproduction. Options:
|
190
|
-
|
191
|
-
- 'aggressive': Aggressive policy using very aggressive selection policy.
|
192
|
-
Advantages: fast training.
|
193
|
-
Disadvantages: may lead to fitness stuck in a local maximum or minimum.
|
194
|
-
|
195
|
-
- 'explorer': Explorer policy increases population diversity.
|
196
|
-
Advantages: fitness does not get stuck at local maximum or minimum.
|
197
|
-
Disadvantages: slow training.
|
198
|
-
|
199
|
-
Suggestions: Use hybrid and dynamic policy. When fitness appears stuck, switch to the 'explorer' policy.
|
200
|
-
|
201
|
-
Default: 'aggressive'.
|
202
|
-
|
203
|
-
fitness_bias (float, optional): Fitness bias must be a probability value between 0 and 1 that determines the effect of fitness on the crossover process. Default: 1`.
|
204
|
-
|
205
|
-
bad_genomes_mutation_prob (float, optional): The probability of applying mutation to the bad genomes.
|
206
|
-
Must be in the range [0, 1]. Also affects the mutation probability of the best genomes inversely.
|
207
|
-
For example, a value of 0.7 for bad genomes implies 0.3 for best genomes. Default: Determined by `policy`.
|
208
|
-
|
209
|
-
bad_genomes_selection_prob (float, optional): The probability of crossover parents are bad genomes ? [0-1] Default: Determined by `policy`.
|
210
|
-
|
211
|
-
activation_mutate_prob (float, optional): The probability of applying mutation to the activation functions.
|
212
|
-
Must be in the range [0, 1]. Default is 0.5 (50%).
|
213
|
-
|
214
|
-
cross_over_mode (str, optional): Specifies the crossover method to use. Options:
|
215
|
-
- 'tpm': Two-Point Matrix Crossover.
|
216
|
-
Default is 'tpm'.
|
217
|
-
|
218
|
-
activation_mutate_add_prob (float, optional): The probability of adding a new activation function to the genome for mutation.
|
219
|
-
Must be in the range [0, 1]. Default is 0.5.
|
220
|
-
|
221
|
-
activation_mutate_delete_prob (float, optional): The probability of deleting an existing activation function
|
222
|
-
from the genome for mutation. Must be in the range [0, 1]. Default is 0.5.
|
223
|
-
|
224
|
-
activation_mutate_change_prob (float, optional): The probability of changing an activation function in the genome for mutation.
|
225
|
-
Must be in the range [0, 1]. Default is 0.5.
|
226
|
-
|
227
|
-
weight_mutate_prob (float, optional): The probability of mutating a weight in the genome.
|
228
|
-
Must be in the range [0, 1]. Default is 1 (%100).
|
229
|
-
|
230
|
-
weight_mutate_threshold (int): Determines max how much weight mutaiton operation applying. (Function automaticly determines to min) Default: 16
|
231
|
-
|
232
|
-
activation_selection_add_prob (float, optional): The probability of adding an existing activation function for crossover.
|
233
|
-
Must be in the range [0, 1]. Default is 0.5. (WARNING! Higher values increase complexity. For faster training, increase this value.)
|
234
|
-
|
235
|
-
activation_selection_change_prob (float, optional): The probability of changing an activation function in the genome for crossover.
|
236
|
-
Must be in the range [0, 1]. Default is 0.5.
|
237
|
-
|
238
|
-
activation_mutate_threshold (int): Determines max how much activation mutaiton operation applying. (Function automaticly determines to min) Default: 20
|
239
|
-
|
240
|
-
activation_selection_threshold (int, optional): Determines max how much activaton transferable to child from undominant parent. (Function automaticly determines to min) Default: 20
|
241
|
-
|
242
|
-
is_mlp (bool, optional): Evolve PLAN model or MLP model ? Default: False (PLAN)
|
243
|
-
|
244
|
-
save_best_genome (bool, optional): Save the best genome of the previous generation to the next generation. Default: False
|
245
|
-
|
246
|
-
dtype (cupy.dtype): Data type for the arrays. Default: cp.float32.
|
247
|
-
Example: cp.float64 or cp.float16 [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not recommended!].
|
248
|
-
|
249
|
-
Raises:
|
250
|
-
ValueError:
|
251
|
-
- If `policy` is not one of the specified values ('aggressive', 'explorer').
|
252
|
-
- If 'strategy' is not one of the specified values ('less_selective', 'normal_selective', 'more_selective')
|
253
|
-
- If `cross_over_mode` is not one of the specified values ('tpm').
|
254
|
-
- If `bad_genomes_mutation_prob`, `activation_mutate_prob`, or other probability parameters are not in the range 0 and 1.
|
255
|
-
- If the population size is odd (ensuring an even number of genomes is required for proper selection).
|
256
|
-
- If 'fitness_bias' value is not in range 0 and 1.
|
257
|
-
|
258
|
-
Returns:
|
259
|
-
tuple: A tuple containing:
|
260
|
-
- weights (numpy.ndarray): The updated weights for the population after selection, crossover, and mutation.
|
261
|
-
The shape is (population_size, output_shape, input_shape).
|
262
|
-
- activations (list): The updated list of activation functions for the population.
|
263
|
-
|
264
|
-
Notes:
|
265
|
-
- **Selection Process**:
|
266
|
-
- The genomes are sorted by their fitness (based on `fitness`), and then split into "best" and "bad" halves.
|
267
|
-
- The best genomes are retained, and the bad genomes are modified based on the selected strategy.
|
268
|
-
|
269
|
-
- **Crossover Strategies**:
|
270
|
-
- The **'cross_over'** strategy performs crossover, where parts of the best genomes' weights are combined with other good genomes to create new weight matrices.
|
271
|
-
|
272
|
-
- **Mutation**:
|
273
|
-
- Mutation is applied to both the best and bad genomes, depending on the mutation probability and the `policy`.
|
274
|
-
- `bad_genomes_mutation_prob` determines the probability of applying mutations to the bad genomes.
|
275
|
-
- If `activation_mutate_prob` is provided, activation function mutations are applied to the genomes based on this probability.
|
276
|
-
|
277
|
-
- **Population Size**: The population size must be an even number to properly split the best and bad genomes. If `fitness` has an odd length, an error is raised.
|
278
|
-
|
279
|
-
- **Logging**: If `show_info=True`, the current generation and the maximum reward from the population are printed for tracking the learning progress.
|
280
|
-
|
281
|
-
Example:
|
282
|
-
```python
|
283
|
-
weights, activations = ene_cuda.evolver(weights, activations, 1, fitness, show_info=True, strategy='normal_selective', policy='aggressive')
|
284
|
-
```
|
285
|
-
|
286
|
-
- The function returns the updated weights and activations after processing based on the chosen strategy, policy, and mutation parameters.
|
287
|
-
"""
|
288
|
-
|
289
|
-
### ERROR AND CONFIGURATION CHECKS:
|
290
|
-
if strategy == 'normal_selective':
|
291
|
-
if bad_genomes_mutation_prob is None: bad_genomes_mutation_prob = 0.7 # EFFECTS MUTATION
|
292
|
-
if bad_genomes_selection_prob is None: bad_genomes_selection_prob = 0.25 # EFFECTS CROSS-OVER
|
293
|
-
|
294
|
-
elif strategy == 'more_selective':
|
295
|
-
if bad_genomes_mutation_prob is None: bad_genomes_mutation_prob = 0.85 # EFFECTS MUTATION
|
296
|
-
if bad_genomes_selection_prob is None: bad_genomes_selection_prob = 0.1 # EFFECTS CROSS-OVER
|
297
|
-
|
298
|
-
elif strategy == 'less_selective':
|
299
|
-
if bad_genomes_mutation_prob is None: bad_genomes_mutation_prob = 0.6 # EFFECTS MUTATION
|
300
|
-
if bad_genomes_selection_prob is None: bad_genomes_selection_prob = 0.5 # EFFECTS CROSS-OVER
|
301
|
-
|
302
|
-
else:
|
303
|
-
raise ValueError("strategy parameter must be: 'normal_selective' or 'more_selective' or 'less_selective'")
|
304
|
-
|
305
|
-
if ((activation_mutate_add_prob < 0 or activation_mutate_add_prob > 1) or
|
306
|
-
(activation_mutate_change_prob < 0 or activation_mutate_change_prob > 1) or
|
307
|
-
(activation_mutate_delete_prob < 0 or activation_mutate_delete_prob > 1) or
|
308
|
-
(weight_mutate_prob < 0 or weight_mutate_prob > 1) or
|
309
|
-
(activation_selection_add_prob < 0 or activation_selection_add_prob > 1) or (
|
310
|
-
activation_selection_change_prob < 0 or activation_selection_change_prob > 1)):
|
311
|
-
|
312
|
-
raise ValueError("All hyperparameters ending with 'prob' must be a number between 0 and 1.")
|
313
|
-
|
314
|
-
if fitness_bias < 0 or fitness_bias > 1: raise ValueError("fitness_bias value must be a number between 0 and 1.")
|
315
|
-
|
316
|
-
if bad_genomes_mutation_prob is not None:
|
317
|
-
if bad_genomes_mutation_prob < 0 or bad_genomes_mutation_prob > 1:
|
318
|
-
raise ValueError("bad_genomes_mutation_prob parameter must be float and 0-1 range")
|
319
|
-
|
320
|
-
if activation_mutate_prob is not None:
|
321
|
-
if activation_mutate_prob < 0 or activation_mutate_prob > 1:
|
322
|
-
raise ValueError("activation_mutate_prob parameter must be float and 0-1 range")
|
323
|
-
|
324
|
-
if len(fitness) % 2 == 0:
|
325
|
-
slice_center = int(len(fitness) / 2)
|
326
|
-
|
327
|
-
else:
|
328
|
-
raise ValueError("genome population size must be even number. for example: not 99, make 100 or 98.")
|
329
|
-
|
330
|
-
if weight_evolve is False: origin_weights = cp.copy(weights) if is_mlp else copy.deepcopy(weights)
|
331
|
-
|
332
|
-
if is_mlp:
|
333
|
-
activation_mutate_add_prob = 0
|
334
|
-
activation_selection_add_prob = 0
|
335
|
-
activation_mutate_delete_prob = 0
|
336
|
-
|
337
|
-
if isinstance(weights, list):
|
338
|
-
|
339
|
-
for i in range(len(weights)):
|
340
|
-
|
341
|
-
for j in range(len(weights[i])):
|
342
|
-
|
343
|
-
weights[i][j] = weights[i][j].get()
|
344
|
-
|
345
|
-
weights = np.array(weights, dtype=object)
|
346
|
-
|
347
|
-
### FITNESS LIST IS SORTED IN ASCENDING ORDER, AND THE WEIGHT AND ACTIVATIONS OF EACH GENOME ARE SORTED ACCORDING TO THIS ORDER:
|
348
|
-
|
349
|
-
previous_best_genome_index = np.argmax(fitness)
|
350
|
-
|
351
|
-
sort_indices = cp.argsort(fitness)
|
352
|
-
|
353
|
-
fitness = fitness[sort_indices]
|
354
|
-
weights = weights[sort_indices]
|
355
|
-
|
356
|
-
activations = [activations[int(i)] for i in sort_indices]
|
357
|
-
|
358
|
-
### GENOMES ARE DIVIDED INTO TWO GROUPS: GOOD GENOMES AND BAD GENOMES:
|
359
|
-
|
360
|
-
good_weights = weights[slice_center:]
|
361
|
-
bad_weights = weights[:slice_center]
|
362
|
-
best_weight = cp.copy(good_weights[-1])
|
363
|
-
|
364
|
-
good_activations = list(activations[slice_center:])
|
365
|
-
bad_activations = list(activations[:slice_center])
|
366
|
-
best_activations = good_activations[-1].copy() if isinstance(good_activations[-1], list) else good_activations[-1]
|
367
|
-
|
368
|
-
|
369
|
-
### PLANEAT IS APPLIED ACCORDING TO THE SPECIFIED POLICY, STRATEGY, AND PROBABILITY CONFIGURATION:
|
370
|
-
|
371
|
-
bar_format = loading_bars()[0]
|
372
|
-
|
373
|
-
if bar_status: progress = initialize_loading_bar(len(bad_weights), desc="GENERATION: " + str(what_gen), bar_format=bar_format, ncols=50)
|
374
|
-
normalized_fitness = non_neg_normalization(fitness, dtype=dtype)
|
375
|
-
|
376
|
-
best_fitness = normalized_fitness[-1]
|
377
|
-
epsilon = cp.finfo(float).eps
|
378
|
-
|
379
|
-
child_W = cp.copy(bad_weights)
|
380
|
-
child_act = bad_activations.copy()
|
381
|
-
|
382
|
-
mutated_W = cp.copy(bad_weights)
|
383
|
-
mutated_act = bad_activations.copy()
|
384
|
-
|
385
|
-
|
386
|
-
for i in range(len(bad_weights)):
|
387
|
-
|
388
|
-
if policy == 'aggressive':
|
389
|
-
first_parent_W = best_weight
|
390
|
-
first_parent_act = best_activations
|
391
|
-
first_parent_fitness = best_fitness
|
392
|
-
|
393
|
-
elif policy == 'explorer':
|
394
|
-
first_parent_W = good_weights[i]
|
395
|
-
first_parent_act = good_activations[i]
|
396
|
-
first_parent_fitness = normalized_fitness[len(good_weights) + i]
|
397
|
-
|
398
|
-
else: raise ValueError("policy parameter must be: 'aggressive' or 'explorer'")
|
399
|
-
|
400
|
-
second_parent_W, second_parent_act, s_i = second_parent_selection(good_weights, bad_weights, good_activations, bad_activations, bad_genomes_selection_prob)
|
401
|
-
|
402
|
-
if is_mlp:
|
403
|
-
for l in range(len(first_parent_W)):
|
404
|
-
|
405
|
-
if l == 0:
|
406
|
-
act_l = 0
|
407
|
-
else:
|
408
|
-
act_l = l - 1
|
409
|
-
|
410
|
-
child_W[i][l], child_act[i][act_l] = cross_over(cp.array(first_parent_W[l]),
|
411
|
-
cp.array(second_parent_W[l]),
|
412
|
-
first_parent_act[act_l],
|
413
|
-
second_parent_act[act_l],
|
414
|
-
cross_over_mode=cross_over_mode,
|
415
|
-
activation_selection_add_prob=activation_selection_add_prob,
|
416
|
-
activation_selection_change_prob=activation_selection_change_prob,
|
417
|
-
activation_selection_threshold=activation_selection_threshold,
|
418
|
-
bad_genomes_selection_prob=bad_genomes_selection_prob,
|
419
|
-
first_parent_fitness=first_parent_fitness,
|
420
|
-
fitness_bias=fitness_bias,
|
421
|
-
second_parent_fitness=normalized_fitness[s_i],
|
422
|
-
weight_evolve=weight_evolve,
|
423
|
-
epsilon=epsilon
|
424
|
-
)
|
425
|
-
|
426
|
-
|
427
|
-
else:
|
428
|
-
child_W[i], child_act[i] = cross_over(first_parent_W,
|
429
|
-
second_parent_W,
|
430
|
-
first_parent_act,
|
431
|
-
second_parent_act,
|
432
|
-
cross_over_mode=cross_over_mode,
|
433
|
-
activation_selection_add_prob=activation_selection_add_prob,
|
434
|
-
activation_selection_change_prob=activation_selection_change_prob,
|
435
|
-
activation_selection_threshold=activation_selection_threshold,
|
436
|
-
bad_genomes_selection_prob=bad_genomes_selection_prob,
|
437
|
-
first_parent_fitness=first_parent_fitness,
|
438
|
-
fitness_bias=fitness_bias,
|
439
|
-
second_parent_fitness=normalized_fitness[s_i],
|
440
|
-
weight_evolve=weight_evolve,
|
441
|
-
epsilon=epsilon
|
442
|
-
)
|
443
|
-
|
444
|
-
mutation_prob = random.uniform(0, 1)
|
445
|
-
|
446
|
-
if mutation_prob > bad_genomes_mutation_prob:
|
447
|
-
genome_W = good_weights[i]
|
448
|
-
genome_act = good_activations[i]
|
449
|
-
|
450
|
-
fitness_index = int(len(good_weights) + i)
|
451
|
-
|
452
|
-
else:
|
453
|
-
genome_W = bad_weights[i]
|
454
|
-
genome_act = bad_activations[i]
|
455
|
-
|
456
|
-
fitness_index = i
|
457
|
-
|
458
|
-
if is_mlp:
|
459
|
-
for l in range(len(genome_W)):
|
460
|
-
|
461
|
-
if l == 0:
|
462
|
-
act_l = 0
|
463
|
-
else:
|
464
|
-
act_l = l - 1
|
465
|
-
|
466
|
-
mutated_W[i][l], mutated_act[i][act_l] = mutation(cp.array(genome_W[l]),
|
467
|
-
genome_act[act_l],
|
468
|
-
activation_mutate_prob=activation_mutate_prob,
|
469
|
-
activation_add_prob=activation_mutate_add_prob,
|
470
|
-
activation_delete_prob=activation_mutate_delete_prob,
|
471
|
-
activation_change_prob=activation_mutate_change_prob,
|
472
|
-
weight_mutate_prob=weight_mutate_prob,
|
473
|
-
weight_mutate_threshold=weight_mutate_threshold,
|
474
|
-
genome_fitness=normalized_fitness[fitness_index],
|
475
|
-
activation_mutate_threshold=activation_mutate_threshold,
|
476
|
-
weight_evolve=weight_evolve,
|
477
|
-
epsilon=epsilon
|
478
|
-
)
|
479
|
-
else:
|
480
|
-
mutated_W[i], mutated_act[i] = mutation(genome_W,
|
481
|
-
genome_act,
|
482
|
-
activation_mutate_prob=activation_mutate_prob,
|
483
|
-
activation_add_prob=activation_mutate_add_prob,
|
484
|
-
activation_delete_prob=activation_mutate_delete_prob,
|
485
|
-
activation_change_prob=activation_mutate_change_prob,
|
486
|
-
weight_mutate_prob=weight_mutate_prob,
|
487
|
-
weight_mutate_threshold=weight_mutate_threshold,
|
488
|
-
genome_fitness=normalized_fitness[fitness_index],
|
489
|
-
activation_mutate_threshold=activation_mutate_threshold,
|
490
|
-
weight_evolve=weight_evolve,
|
491
|
-
epsilon=epsilon
|
492
|
-
)
|
493
|
-
|
494
|
-
if bar_status: progress.update(1)
|
495
|
-
|
496
|
-
if is_mlp:
|
497
|
-
for i in range(len(child_W)):
|
498
|
-
for j in range(len(child_W[i])):
|
499
|
-
child_W[i][j] = cp.array(child_W[i][j], dtype=dtype)
|
500
|
-
|
501
|
-
child_W = list(child_W)
|
502
|
-
mutated_W = list(mutated_W)
|
503
|
-
weights = child_W + mutated_W
|
504
|
-
else:
|
505
|
-
weights = cp.vstack((child_W, mutated_W), dtype=dtype)
|
506
|
-
|
507
|
-
activations = child_act + mutated_act
|
508
|
-
|
509
|
-
if save_best_genome:
|
510
|
-
weights[0] = best_weight
|
511
|
-
activations[0] = best_activations
|
512
|
-
|
513
|
-
### INFO PRINTING CONSOLE
|
514
|
-
|
515
|
-
if show_info == True:
|
516
|
-
print("\nGENERATION:", str(what_gen) + ' FINISHED \n')
|
517
|
-
print("*** Configuration Settings ***")
|
518
|
-
print(" POPULATION SIZE: ", str(len(weights)))
|
519
|
-
print(" STRATEGY: ", strategy)
|
520
|
-
print(" CROSS OVER MODE: ", cross_over_mode)
|
521
|
-
print(" POLICY: ", policy)
|
522
|
-
print(" BAD GENOMES MUTATION PROB: ", str(bad_genomes_mutation_prob))
|
523
|
-
print(" GOOD GENOMES MUTATION PROB: ", str(round(1 - bad_genomes_mutation_prob, 2)))
|
524
|
-
print(" BAD GENOMES SELECTION PROB: ", str(bad_genomes_selection_prob))
|
525
|
-
print(" WEIGHT MUTATE PROB: ", str(weight_mutate_prob))
|
526
|
-
print(" WEIGHT MUTATE THRESHOLD: ", str(weight_mutate_threshold))
|
527
|
-
print(" ACTIVATION MUTATE PROB: ", str(activation_mutate_prob))
|
528
|
-
print(" ACTIVATION MUTATE THRESHOLD: ", str(activation_mutate_threshold))
|
529
|
-
print(" ACTIVATION MUTATE ADD PROB: ", str(activation_mutate_add_prob))
|
530
|
-
print(" ACTIVATION MUTATE DELETE PROB: ", str(activation_mutate_delete_prob))
|
531
|
-
print(" ACTIVATION MUTATE CHANGE PROB: ", str(activation_mutate_change_prob))
|
532
|
-
print(" ACTIVATION SELECTION THRESHOLD:", str(activation_selection_threshold))
|
533
|
-
print(" ACTIVATION SELECTION ADD PROB: ", str(activation_selection_add_prob))
|
534
|
-
print(" ACTIVATION SELECTION CHANGE PROB: ", str(activation_selection_change_prob))
|
535
|
-
print(" FITNESS BIAS: ", str(fitness_bias))
|
536
|
-
print(" SAVE BEST GENOME: ", str(save_best_genome) + '\n')
|
537
|
-
|
538
|
-
print("*** Performance ***")
|
539
|
-
print(" MAX FITNESS: ", str(cp.round(max(fitness), 2)))
|
540
|
-
print(" MEAN FITNESS: ", str(cp.round(cp.mean(fitness), 2)))
|
541
|
-
print(" MIN FITNESS: ", str(cp.round(min(fitness), 2)) + '\n')
|
542
|
-
|
543
|
-
print(" BEST GENOME ACTIVATION LENGTH: ", str(len(best_activations)))
|
544
|
-
print(" PREVIOUS BEST GENOME ACTIVATION LENGTH: ", str(len(best_activations)))
|
545
|
-
print(" PREVIOUS BEST GENOME INDEX: ", str(previous_best_genome_index) + '\n')
|
546
|
-
|
547
|
-
if weight_evolve is False: weights = origin_weights
|
548
|
-
|
549
|
-
return weights, activations
|
550
|
-
|
551
|
-
|
552
|
-
def evaluate(Input, weights, activations, is_mlp=False):
|
553
|
-
"""
|
554
|
-
Evaluates the performance of a population of genomes, applying different activation functions
|
555
|
-
and weights depending on whether reinforcement learning mode is enabled or not.
|
556
|
-
|
557
|
-
Args:
|
558
|
-
Input (list or cupy.ndarray): A list or 2D numpy or cupy array where each element represents
|
559
|
-
a genome (A list of input features for each genome, or a single set of input features for one genome).
|
560
|
-
|
561
|
-
weights (list or cupy.ndarray): A list or 2D numpy array of weights corresponding to each genome
|
562
|
-
in `x_population`. This determines the strength of connections.
|
563
|
-
|
564
|
-
activations (list or str): A list where each entry represents an activation function
|
565
|
-
or a potentiation strategy applied to each genome. If only one
|
566
|
-
activation function is used, this can be a single string.
|
567
|
-
is_mlp (bool, optional): Evaluate PLAN model or MLP model ? Default: False (PLAN)
|
568
|
-
|
569
|
-
Returns:
|
570
|
-
list: A list of outputs corresponding to each genome in the population after applying the respective
|
571
|
-
activation function and weights.
|
572
|
-
|
573
|
-
Example:
|
574
|
-
```python
|
575
|
-
outputs = evaluate(Input, weights, activations)
|
576
|
-
```
|
577
|
-
|
578
|
-
- The function returns a list of outputs after processing the population, where each element corresponds to
|
579
|
-
the output for each genome in population.
|
580
|
-
"""
|
581
|
-
### THE OUTPUTS ARE RETURNED WHERE EACH GENOME'S OUTPUT MATCHES ITS INDEX:
|
582
|
-
|
583
|
-
if isinstance(activations, str):
|
584
|
-
activations = [activations]
|
585
|
-
else:
|
586
|
-
activations = [item if isinstance(item, list) else [item] for item in activations]
|
587
|
-
|
588
|
-
if is_mlp:
|
589
|
-
|
590
|
-
layer = Input
|
591
|
-
for i in range(len(weights)):
|
592
|
-
if i != len(weights) - 1: layer = apply_activation(layer, activations[i])
|
593
|
-
layer = layer @ weights[i].T
|
594
|
-
|
595
|
-
return layer
|
596
|
-
|
597
|
-
else:
|
598
|
-
|
599
|
-
Input = apply_activation(Input, activations)
|
600
|
-
result = Input @ weights.T
|
601
|
-
|
602
|
-
return result
|
603
|
-
|
604
|
-
def cross_over(first_parent_W,
|
605
|
-
second_parent_W,
|
606
|
-
first_parent_act,
|
607
|
-
second_parent_act,
|
608
|
-
cross_over_mode,
|
609
|
-
activation_selection_add_prob,
|
610
|
-
activation_selection_change_prob,
|
611
|
-
activation_selection_threshold,
|
612
|
-
bad_genomes_selection_prob,
|
613
|
-
first_parent_fitness,
|
614
|
-
second_parent_fitness,
|
615
|
-
fitness_bias,
|
616
|
-
weight_evolve,
|
617
|
-
epsilon):
|
618
|
-
"""
|
619
|
-
Performs a crossover operation on two sets of weights and activation functions.
|
620
|
-
This function combines two individuals (represented by their weights and activation functions)
|
621
|
-
to create a new individual by exchanging parts of their weight matrices and activation functions.
|
622
|
-
|
623
|
-
Args:
|
624
|
-
first_parent_W (cupy.ndarray): The weight matrix of the first individual (parent).
|
625
|
-
|
626
|
-
second_parent_W (numpy.ndarray): The weight matrix of the second individual (parent).
|
627
|
-
|
628
|
-
first_parent_act (str or list): The activation function(s) of the first individual.
|
629
|
-
|
630
|
-
second_parent_act (str or list): The activation function(s) of the second individual.
|
631
|
-
|
632
|
-
cross_over_mode (str): Determines the crossover method to be used. Options:
|
633
|
-
- 'tpm': Two-Point Matrix Crossover, where sub-matrices of weights are swapped between parents.
|
634
|
-
|
635
|
-
activation_selection_add_prob (float): Probability of adding new activation functions
|
636
|
-
from the second parent to the child genome.
|
637
|
-
|
638
|
-
activation_selection_change_prob (float): Probability of replacing an activation function in the child genome
|
639
|
-
with one from the second parent.
|
640
|
-
|
641
|
-
activation_selection_threshold (int): Determines max how much activaton transferable to child from undominant parent. (Function automaticly determines to min)
|
642
|
-
|
643
|
-
bad_genomes_selection_prob (float): Probability of selecting a "bad" genome for replacement with the offspring.
|
644
|
-
|
645
|
-
first_parent_fitness (float): Fitness score of the first parent.
|
646
|
-
|
647
|
-
second_parent_fitness (float): Fitness score of the second parent.
|
648
|
-
|
649
|
-
fitness_bias (float): A bias factor used to favor fitter parents during crossover operations.
|
650
|
-
|
651
|
-
weight_evolve (bool, optional): Are weights to be evolves or just activation combinations.
|
652
|
-
|
653
|
-
epsilon (float): Small epsilon constant
|
654
|
-
|
655
|
-
Returns:
|
656
|
-
tuple: A tuple containing:
|
657
|
-
- child_W (numpy.ndarray): The weight matrix of the new individual created by crossover.
|
658
|
-
- child_act (list): The list of activation functions of the new individual created by crossover.
|
659
|
-
|
660
|
-
Notes:
|
661
|
-
- The crossover is performed based on the selected `cross_over_mode`.
|
662
|
-
- In 'tpm' mode, random sub-matrices from the parent weight matrices are swapped.
|
663
|
-
- Activation functions from both parents are combined using the probabilities and rates provided.
|
664
|
-
|
665
|
-
Example:
|
666
|
-
```python
|
667
|
-
new_weights, new_activations = cross_over(
|
668
|
-
first_parent_W=parent1_weights,
|
669
|
-
second_parent_W=parent2_weights,
|
670
|
-
first_parent_act=parent1_activations,
|
671
|
-
second_parent_act=parent2_activations,
|
672
|
-
cross_over_mode='tpm',
|
673
|
-
activation_selection_add_prob=0.8,
|
674
|
-
activation_selection_change_prob=0.5,
|
675
|
-
activation_selection_threshold=2,
|
676
|
-
bad_genomes_selection_prob=0.7,
|
677
|
-
first_parent_fitness=0.9,
|
678
|
-
second_parent_fitness=0.85,
|
679
|
-
fitness_bias=0.6,
|
680
|
-
weight_evolve=True,
|
681
|
-
epsilon=cp.finfo(float).eps
|
682
|
-
)
|
683
|
-
```
|
684
|
-
"""
|
685
|
-
|
686
|
-
### THE GIVEN GENOMES' WEIGHTS ARE RANDOMLY SELECTED AND COMBINED OVER A RANDOM RANGE. SIMILARLY, THEIR ACTIVATIONS ARE COMBINED. A NEW GENOME IS RETURNED WITH THE COMBINED WEIGHTS FIRST, FOLLOWED BY THE ACTIVATIONS:
|
687
|
-
|
688
|
-
start = 0
|
689
|
-
|
690
|
-
row_end = first_parent_W.shape[0]
|
691
|
-
col_end = first_parent_W.shape[1]
|
692
|
-
|
693
|
-
total_gene = row_end * col_end
|
694
|
-
half_of_gene = int(total_gene / 2)
|
695
|
-
|
696
|
-
decision = dominant_parent_selection(bad_genomes_selection_prob)
|
697
|
-
|
698
|
-
if decision == 'first_parent':
|
699
|
-
dominant_parent_W = cp.copy(first_parent_W)
|
700
|
-
dominant_parent_act = first_parent_act
|
701
|
-
|
702
|
-
undominant_parent_W = cp.copy(second_parent_W)
|
703
|
-
undominant_parent_act = second_parent_act
|
704
|
-
succes = second_parent_fitness + epsilon
|
705
|
-
|
706
|
-
elif decision == 'second_parent':
|
707
|
-
dominant_parent_W = cp.copy(second_parent_W)
|
708
|
-
dominant_parent_act = second_parent_act
|
709
|
-
|
710
|
-
undominant_parent_W = cp.copy(first_parent_W)
|
711
|
-
undominant_parent_act = first_parent_act
|
712
|
-
succes = first_parent_fitness + epsilon
|
713
|
-
|
714
|
-
if weight_evolve is True:
|
715
|
-
while True:
|
716
|
-
|
717
|
-
row_cut_start = int(random.uniform(start, row_end))
|
718
|
-
col_cut_start = int(random.uniform(start, col_end))
|
719
|
-
|
720
|
-
row_cut_end = int(random.uniform(start, row_end))
|
721
|
-
col_cut_end = int(random.uniform(start, col_end))
|
722
|
-
|
723
|
-
if ((row_cut_end > row_cut_start) and
|
724
|
-
(col_cut_end > col_cut_start) and
|
725
|
-
(((row_cut_end + 1) - (row_cut_start + 1) * 2) + ((col_cut_end + 1) - (col_cut_start + 1) * 2) <= half_of_gene)):
|
726
|
-
break
|
727
|
-
|
728
|
-
selection_bias = random.uniform(0, 1)
|
729
|
-
|
730
|
-
if fitness_bias > selection_bias:
|
731
|
-
row_cut_start = math.floor(row_cut_start * succes)
|
732
|
-
row_cut_end = math.ceil(row_cut_end * succes)
|
733
|
-
|
734
|
-
col_cut_start = math.floor(col_cut_start * succes)
|
735
|
-
col_cut_end = math.ceil(col_cut_end * succes)
|
736
|
-
|
737
|
-
child_W = dominant_parent_W
|
738
|
-
|
739
|
-
if cross_over_mode == 'tpm':
|
740
|
-
child_W[row_cut_start:row_cut_end, col_cut_start:col_cut_end] = undominant_parent_W[row_cut_start:row_cut_end, col_cut_start:col_cut_end]
|
741
|
-
|
742
|
-
else: child_W = dominant_parent_W
|
743
|
-
|
744
|
-
if isinstance(dominant_parent_act, str): dominant_parent_act = [dominant_parent_act]
|
745
|
-
if isinstance(undominant_parent_act, str): undominant_parent_act = [undominant_parent_act]
|
746
|
-
|
747
|
-
child_act = list(np.copy(dominant_parent_act))
|
748
|
-
|
749
|
-
activation_selection_add_prob = 1 - activation_selection_add_prob # if prob 0.8 (%80) then 1 - 0.8. Because 0-1 random number probably greater than 0.2
|
750
|
-
potential_activation_selection_add = random.uniform(0, 1)
|
751
|
-
|
752
|
-
if potential_activation_selection_add > activation_selection_add_prob:
|
753
|
-
|
754
|
-
threshold = abs(activation_selection_threshold / succes)
|
755
|
-
new_threshold = threshold
|
756
|
-
|
757
|
-
while True:
|
758
|
-
|
759
|
-
random_index = int(random.uniform(0, len(undominant_parent_act)))
|
760
|
-
random_undominant_activation = undominant_parent_act[random_index]
|
761
|
-
|
762
|
-
child_act.append(random_undominant_activation)
|
763
|
-
new_threshold += threshold
|
764
|
-
|
765
|
-
if len(dominant_parent_act) > new_threshold:
|
766
|
-
pass
|
767
|
-
|
768
|
-
else:
|
769
|
-
break
|
770
|
-
|
771
|
-
activation_selection_change_prob = 1 - activation_selection_change_prob
|
772
|
-
potential_activation_selection_change_prob = random.uniform(0, 1)
|
773
|
-
|
774
|
-
if potential_activation_selection_change_prob > activation_selection_change_prob:
|
775
|
-
|
776
|
-
threshold = abs(activation_selection_threshold / succes)
|
777
|
-
new_threshold = threshold
|
778
|
-
|
779
|
-
while True:
|
780
|
-
|
781
|
-
random_index_undominant = int(random.uniform(0, len(undominant_parent_act)))
|
782
|
-
random_index_dominant = int(random.uniform(0, len(dominant_parent_act)))
|
783
|
-
random_undominant_activation = undominant_parent_act[random_index_undominant]
|
784
|
-
|
785
|
-
child_act[random_index_dominant] = random_undominant_activation
|
786
|
-
new_threshold += threshold
|
787
|
-
|
788
|
-
if len(dominant_parent_act) > new_threshold:
|
789
|
-
pass
|
790
|
-
|
791
|
-
else:
|
792
|
-
break
|
793
|
-
|
794
|
-
return child_W, child_act
|
795
|
-
|
796
|
-
|
797
|
-
def mutation(weight,
|
798
|
-
activations,
|
799
|
-
activation_mutate_prob,
|
800
|
-
activation_add_prob,
|
801
|
-
activation_delete_prob,
|
802
|
-
activation_change_prob,
|
803
|
-
weight_mutate_prob,
|
804
|
-
weight_mutate_threshold,
|
805
|
-
genome_fitness,
|
806
|
-
activation_mutate_threshold,
|
807
|
-
weight_evolve,
|
808
|
-
epsilon):
|
809
|
-
"""
|
810
|
-
Performs mutation on the given weight matrix and activation functions.
|
811
|
-
- The weight matrix is mutated by randomly changing its values based on the mutation probability.
|
812
|
-
- The activation functions are mutated by adding, removing, or replacing them with predefined probabilities.
|
813
|
-
|
814
|
-
Args:
|
815
|
-
weight (cupy.ndarray): The weight matrix to mutate.
|
816
|
-
|
817
|
-
activations (list): The list of activation functions to mutate.
|
818
|
-
|
819
|
-
activation_mutate_prob (float): The overall probability of mutating activation functions.
|
820
|
-
|
821
|
-
activation_add_prob (float): Probability of adding a new activation function.
|
822
|
-
|
823
|
-
activation_delete_prob (float): Probability of removing an existing activation function.
|
824
|
-
|
825
|
-
activation_change_prob (float): Probability of replacing an existing activation function with a new one.
|
826
|
-
|
827
|
-
weight_mutate_prob (float): The probability of mutating weight matrix.
|
828
|
-
|
829
|
-
weight_mutate_threshold (float): Determines max how much weight mutaiton operation applying. (Function automaticly determines to min)
|
830
|
-
|
831
|
-
genome_fitness (float): Fitness value of genome
|
832
|
-
|
833
|
-
activation_mutate_threshold (float): Determines max how much activation mutaiton operation applying. (Function automaticly determines to min)
|
834
|
-
|
835
|
-
weight_evolve (bool, optional): Are weights to be mutates or just activation combinations.
|
836
|
-
|
837
|
-
epsilon (float): Small epsilon constant
|
838
|
-
|
839
|
-
Returns:
|
840
|
-
tuple: A tuple containing:
|
841
|
-
- mutated_weight (numpy.ndarray): The weight matrix after mutation.
|
842
|
-
- mutated_activations (list): The list of activation functions after mutation.
|
843
|
-
|
844
|
-
Notes:
|
845
|
-
- Weight mutation:
|
846
|
-
- Each weight has a chance defined by `weight_mutate_prob` to be altered by adding a random value
|
847
|
-
within the range of [0, 1].
|
848
|
-
- Activation mutation:
|
849
|
-
- If `activation_mutate_prob` is triggered, one or more activation functions can be added, removed,
|
850
|
-
or replaced based on their respective probabilities (`activation_add_prob`, `activation_delete_prob`,
|
851
|
-
`activation_change_prob`).
|
852
|
-
- The mutation probabilities should be chosen carefully to balance exploration and exploitation during
|
853
|
-
the optimization process.
|
854
|
-
"""
|
855
|
-
|
856
|
-
if isinstance(activations, str): activations = [activations]
|
857
|
-
|
858
|
-
if weight_evolve is True:
|
859
|
-
|
860
|
-
weight_mutate_prob = 1 - weight_mutate_prob # if prob 0.8 (%80) then 1 - 0.8. Because 0-1 random number probably greater than 0.2
|
861
|
-
potential_weight_mutation = random.uniform(0, 1)
|
862
|
-
|
863
|
-
if potential_weight_mutation > weight_mutate_prob:
|
864
|
-
|
865
|
-
row_end, col_end = weight.shape
|
866
|
-
max_threshold = row_end * col_end
|
867
|
-
threshold = weight_mutate_threshold / (genome_fitness + epsilon)
|
868
|
-
|
869
|
-
n_mutations = min(int(threshold), max_threshold)
|
870
|
-
|
871
|
-
row_indices = cp.random.randint(0, row_end, size=n_mutations)
|
872
|
-
col_indices = cp.random.randint(0, col_end, size=n_mutations)
|
873
|
-
|
874
|
-
new_values = cp.random.uniform(-1, 1, size=n_mutations)
|
875
|
-
|
876
|
-
weight[row_indices, col_indices] = new_values
|
877
|
-
|
878
|
-
activation_mutate_prob = 1 - activation_mutate_prob
|
879
|
-
potential_activation_mutation = random.uniform(0, 1)
|
880
|
-
|
881
|
-
if potential_activation_mutation > activation_mutate_prob:
|
882
|
-
|
883
|
-
genome_fitness += epsilon
|
884
|
-
threshold = abs(activation_mutate_threshold / genome_fitness)
|
885
|
-
max_threshold = len(activations)
|
886
|
-
|
887
|
-
new_threshold = threshold
|
888
|
-
|
889
|
-
except_this = ['spiral', 'circular']
|
890
|
-
all_acts = [item for item in all_activations() if item not in except_this] # SPIRAL AND CIRCULAR ACTIVATION DISCARDED
|
891
|
-
|
892
|
-
activation_add_prob = 1 - activation_add_prob
|
893
|
-
activation_delete_prob = 1 - activation_delete_prob
|
894
|
-
activation_change_prob = 1 - activation_change_prob
|
895
|
-
|
896
|
-
for _ in range(max_threshold):
|
897
|
-
|
898
|
-
potential_activation_add_prob = random.uniform(0, 1)
|
899
|
-
potential_activation_delete_prob = random.uniform(0, 1)
|
900
|
-
potential_activation_change_prob = random.uniform(0, 1)
|
901
|
-
|
902
|
-
|
903
|
-
if potential_activation_delete_prob > activation_delete_prob and len(activations) > 1:
|
904
|
-
|
905
|
-
random_index = random.randint(0, len(activations)-1)
|
906
|
-
activations.pop(random_index)
|
907
|
-
|
908
|
-
|
909
|
-
if potential_activation_add_prob > activation_add_prob:
|
910
|
-
|
911
|
-
try:
|
912
|
-
|
913
|
-
random_index_all_act = int(random.uniform(0, len(all_acts)-1))
|
914
|
-
activations.append(all_acts[random_index_all_act])
|
915
|
-
|
916
|
-
except:
|
917
|
-
|
918
|
-
activation = activations
|
919
|
-
activations = []
|
920
|
-
|
921
|
-
activations.append(activation)
|
922
|
-
activations.append(all_acts[int(random.uniform(0, len(all_acts)-1))])
|
923
|
-
|
924
|
-
if potential_activation_change_prob > activation_change_prob:
|
925
|
-
|
926
|
-
random_index_all_act = int(random.uniform(0, len(all_acts)-1))
|
927
|
-
random_index_genom_act = int(random.uniform(0, len(activations)-1))
|
928
|
-
|
929
|
-
activations[random_index_genom_act] = all_acts[random_index_all_act]
|
930
|
-
|
931
|
-
new_threshold += threshold
|
932
|
-
|
933
|
-
if max_threshold > new_threshold: pass
|
934
|
-
else: break
|
935
|
-
|
936
|
-
return weight, activations
|
937
|
-
|
938
|
-
|
939
|
-
def second_parent_selection(good_weights, bad_weights, good_activations, bad_activations, bad_genomes_selection_prob):
|
940
|
-
|
941
|
-
selection_prob = random.uniform(0, 1)
|
942
|
-
random_index = int(random.uniform(0, len(good_weights)))
|
943
|
-
|
944
|
-
if selection_prob > bad_genomes_selection_prob:
|
945
|
-
second_selected_W = good_weights[random_index]
|
946
|
-
second_selected_act = good_activations[random_index]
|
947
|
-
|
948
|
-
else:
|
949
|
-
second_selected_W = bad_weights[random_index]
|
950
|
-
second_selected_act = bad_activations[random_index]
|
951
|
-
|
952
|
-
return second_selected_W, second_selected_act, random_index
|
953
|
-
|
954
|
-
|
955
|
-
def dominant_parent_selection(bad_genomes_selection_prob):
|
956
|
-
|
957
|
-
selection_prob = random.uniform(0, 1)
|
958
|
-
|
959
|
-
if selection_prob > bad_genomes_selection_prob: decision = 'first_parent'
|
960
|
-
else: decision = 'second_parent'
|
961
|
-
|
962
|
-
return decision
|