pyerualjetwork 3.3.4__py3-none-any.whl → 4.1.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +7 -16
- pyerualjetwork/activation_functions.py +120 -146
- pyerualjetwork/activation_functions_cuda.py +341 -0
- pyerualjetwork/data_operations.py +79 -77
- pyerualjetwork/data_operations_cuda.py +461 -0
- pyerualjetwork/help.py +4 -4
- pyerualjetwork/loss_functions_cuda.py +21 -0
- pyerualjetwork/memory_operations.py +298 -0
- pyerualjetwork/metrics_cuda.py +163 -0
- pyerualjetwork/model_operations.py +81 -23
- pyerualjetwork/model_operations_cuda.py +421 -0
- pyerualjetwork/plan.py +229 -210
- pyerualjetwork/plan_cuda.py +696 -0
- pyerualjetwork/planeat.py +60 -43
- pyerualjetwork/planeat_cuda.py +744 -0
- pyerualjetwork/visualizations.py +153 -129
- pyerualjetwork/visualizations_cuda.py +826 -0
- {pyerualjetwork-3.3.4.dist-info → pyerualjetwork-4.1.9.dist-info}/METADATA +45 -21
- pyerualjetwork-4.1.9.dist-info/RECORD +24 -0
- pyerualjetwork-3.3.4.dist-info/RECORD +0 -15
- {pyerualjetwork-3.3.4.dist-info → pyerualjetwork-4.1.9.dist-info}/WHEEL +0 -0
- {pyerualjetwork-3.3.4.dist-info → pyerualjetwork-4.1.9.dist-info}/top_level.txt +0 -0
pyerualjetwork/planeat.py
CHANGED
@@ -17,10 +17,10 @@ from tqdm import tqdm
|
|
17
17
|
### LIBRARY IMPORTS ###
|
18
18
|
from .plan import feed_forward
|
19
19
|
from .data_operations import normalization
|
20
|
-
from .ui import loading_bars
|
20
|
+
from .ui import loading_bars, initialize_loading_bar
|
21
21
|
from .activation_functions import apply_activation, all_activations
|
22
22
|
|
23
|
-
def define_genomes(input_shape, output_shape, population_size):
|
23
|
+
def define_genomes(input_shape, output_shape, population_size, dtype=np.float32):
|
24
24
|
"""
|
25
25
|
Initializes a population of genomes, where each genome is represented by a set of weights
|
26
26
|
and an associated activation function. Each genome is created with random weights and activation
|
@@ -34,6 +34,8 @@ def define_genomes(input_shape, output_shape, population_size):
|
|
34
34
|
|
35
35
|
population_size (int): The number of genomes (individuals) in the population.
|
36
36
|
|
37
|
+
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
38
|
+
|
37
39
|
Returns:
|
38
40
|
tuple: A tuple containing:
|
39
41
|
- population_weights (numpy.ndarray): A 2D numpy array of shape (population_size, output_shape, input_shape) representing the
|
@@ -54,7 +56,7 @@ def define_genomes(input_shape, output_shape, population_size):
|
|
54
56
|
|
55
57
|
for i in range(len(population_weights)):
|
56
58
|
|
57
|
-
population_weights[i] = np.random.uniform(-1, 1, (output_shape, input_shape))
|
59
|
+
population_weights[i] = np.random.uniform(-1, 1, (output_shape, input_shape)).astype(dtype)
|
58
60
|
population_activations[i] = activations[int(random.uniform(0, len(activations)-1))]
|
59
61
|
|
60
62
|
# ACTIVATIONS APPLYING IN WEIGHTS SPECIFIC OUTPUT CONNECTIONS (MORE PLAN LIKE FEATURES(FOR NON-LINEARITY)):
|
@@ -62,12 +64,12 @@ def define_genomes(input_shape, output_shape, population_size):
|
|
62
64
|
for j in range(population_weights[i].shape[0]):
|
63
65
|
|
64
66
|
population_weights[i][j,:] = apply_activation(population_weights[i][j,:], population_activations[i])
|
65
|
-
population_weights[i][j,:] = normalization(population_weights[i][j,:])
|
67
|
+
population_weights[i][j,:] = normalization(population_weights[i][j,:], dtype=dtype)
|
66
68
|
|
67
|
-
return np.array(population_weights), population_activations
|
69
|
+
return np.array(population_weights, dtype=dtype), population_activations
|
68
70
|
|
69
71
|
|
70
|
-
def evolve(weights, activation_potentiations, what_gen,
|
72
|
+
def evolve(weights, activation_potentiations, what_gen, fitness, show_info=False, strategy='cross_over', bar_status=True, policy='normal_selective', target_fitness='max', mutations=True, bad_genoms_mutation_prob=None, activation_mutate_prob=0.5, save_best_genom=True, cross_over_mode='tpm', activation_add_prob=0.5, activation_delete_prob=0.5, activation_change_prob=0.5, weight_mutate_prob=1, weight_mutate_rate=32, activation_selection_add_prob=0.7, activation_selection_change_prob=0.5, activation_selection_rate=2, dtype=np.float32):
|
71
73
|
"""
|
72
74
|
Applies the evolving process of a population of genomes using selection, crossover, mutation, and activation function potentiation.
|
73
75
|
The function modifies the population's weights and activation functions based on a specified policy, mutation probabilities, and strategy.
|
@@ -81,8 +83,8 @@ Args:
|
|
81
83
|
|
82
84
|
what_gen (int): The current generation number, used for informational purposes or logging.
|
83
85
|
|
84
|
-
|
85
|
-
The array is used to rank the genomes based on their performance. PLANEAT maximizes
|
86
|
+
fitness (numpy.ndarray): A 1D array containing the fitness values of each genome.
|
87
|
+
The array is used to rank the genomes based on their performance. PLANEAT maximizes or minimizes this fitness for looking 'target' hyperparameter.
|
86
88
|
|
87
89
|
show_info (bool, optional): If True, prints information about the current generation and the
|
88
90
|
maximum reward obtained. Also shows the current configuration. Default is False.
|
@@ -94,12 +96,16 @@ Args:
|
|
94
96
|
(PLAN feature, similar to arithmetic crossover but different.)
|
95
97
|
Default is 'cross_over'.
|
96
98
|
|
99
|
+
bar_status (bool, optional): Loading bar status during evolving process of genomes. True or False. Default: True
|
100
|
+
|
97
101
|
policy (str, optional): The selection policy that governs how genomes are selected for reproduction. Options:
|
98
102
|
- 'normal_selective': Normal selection based on reward, where a portion of the bad genes are discarded.
|
99
103
|
- 'more_selective': A more selective policy, where fewer bad genes survive.
|
100
104
|
- 'less_selective': A less selective policy, where more bad genes survive.
|
101
105
|
Default is 'normal_selective'.
|
102
106
|
|
107
|
+
target_fitness (str, optional): Target fitness strategy for PLANEAT optimization. ('max' for machine learning, 'min' for machine unlearning.) Default: 'max'
|
108
|
+
|
103
109
|
mutations (bool, optional): If True, mutations are applied to the bad genomes and potentially
|
104
110
|
to the best genomes as well. Default is True.
|
105
111
|
|
@@ -137,13 +143,15 @@ Args:
|
|
137
143
|
WARNING: if you don't understand do NOT change this value. Default is 32.
|
138
144
|
|
139
145
|
activation_selection_add_prob (float, optional): The probability of adding an existing activation function for cross over.
|
140
|
-
from the genome. Must be in the range [0, 1]. Default is 0.
|
146
|
+
from the genome. Must be in the range [0, 1]. Default is 0.7. (WARNING! More higher values make models more complex. For fast training rise this value.)
|
141
147
|
|
142
148
|
activation_selection_change_prob (float, optional): The probability of changing an activation function in the genome for cross over.
|
143
149
|
Must be in the range [0, 1]. Default is 0.5.
|
144
150
|
|
145
151
|
activation_selection_rate (int, optional): If the activation list of a good genome is smaller than the value entered here, only one activation will undergo a crossover operation. In other words, this parameter controls the model complexity. Default is 2.
|
146
152
|
|
153
|
+
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
154
|
+
|
147
155
|
Raises:
|
148
156
|
ValueError:
|
149
157
|
- If `policy` is not one of the specified values ('normal_selective', 'more_selective', 'less_selective').
|
@@ -159,7 +167,7 @@ Returns:
|
|
159
167
|
|
160
168
|
Notes:
|
161
169
|
- **Selection Process**:
|
162
|
-
- The genomes are sorted by their fitness (based on `
|
170
|
+
- The genomes are sorted by their fitness (based on `fitness`), and then split into "best" and "bad" halves.
|
163
171
|
- The best genomes are retained, and the bad genomes are modified based on the selected strategy.
|
164
172
|
|
165
173
|
- **Crossover and Potentiation Strategies**:
|
@@ -171,13 +179,13 @@ Notes:
|
|
171
179
|
- `bad_genoms_mutation_prob` determines the probability of applying mutations to the bad genomes.
|
172
180
|
- If `activation_mutate_prob` is provided, activation function mutations are applied to the genomes based on this probability.
|
173
181
|
|
174
|
-
- **Population Size**: The population size must be an even number to properly split the best and bad genomes. If `
|
182
|
+
- **Population Size**: The population size must be an even number to properly split the best and bad genomes. If `fitness` has an odd length, an error is raised.
|
175
183
|
|
176
184
|
- **Logging**: If `show_info=True`, the current generation and the maximum reward from the population are printed for tracking the learning progress.
|
177
185
|
|
178
186
|
Example:
|
179
187
|
```python
|
180
|
-
weights, activation_potentiations =
|
188
|
+
weights, activation_potentiations = planeat.evolve(weights, activation_potentiations, 1, fitness, show_info=True, strategy='cross_over', policy='normal_selective')
|
181
189
|
```
|
182
190
|
|
183
191
|
- The function returns the updated weights and activations after processing based on the chosen strategy, policy, and mutation parameters.
|
@@ -215,17 +223,19 @@ Example:
|
|
215
223
|
if not isinstance(activation_mutate_prob, float) or activation_mutate_prob < 0 or activation_mutate_prob > 1:
|
216
224
|
raise ValueError("activation_mutate_prob parameter must be float and 0-1 range")
|
217
225
|
|
218
|
-
if len(
|
219
|
-
slice_center = int(len(
|
226
|
+
if len(fitness) % 2 == 0:
|
227
|
+
slice_center = int(len(fitness) / 2)
|
220
228
|
|
221
229
|
else:
|
222
|
-
raise ValueError("
|
230
|
+
raise ValueError("genome population size must be even number. for example: not 99, make 100 or 98.")
|
223
231
|
|
224
|
-
|
232
|
+
|
233
|
+
### FITNESS IS SORTED IN ASCENDING (OR DESCENDING) ORDER, AND THE WEIGHT AND ACTIVATIONS OF EACH GENOME ARE SORTED ACCORDING TO THIS ORDER:
|
225
234
|
|
226
|
-
|
235
|
+
if target_fitness == 'max': sort_indices = np.argsort(fitness)
|
236
|
+
elif target_fitness == 'min': sort_indices = np.argsort(-fitness)
|
227
237
|
|
228
|
-
|
238
|
+
fitness = fitness[sort_indices]
|
229
239
|
weights = weights[sort_indices]
|
230
240
|
|
231
241
|
activation_potentiations = [activation_potentiations[i] for i in sort_indices]
|
@@ -245,7 +255,9 @@ Example:
|
|
245
255
|
|
246
256
|
bar_format = loading_bars()[0]
|
247
257
|
|
248
|
-
|
258
|
+
if bar_status: progress = initialize_loading_bar(len(bad_weights), desc="GENERATION: " + str(what_gen), bar_format=bar_format, ncols=50)
|
259
|
+
|
260
|
+
for i in range(len(bad_weights)):
|
249
261
|
|
250
262
|
if policy == 'normal_selective':
|
251
263
|
|
@@ -254,7 +266,7 @@ Example:
|
|
254
266
|
|
255
267
|
|
256
268
|
elif strategy == 'potentiate':
|
257
|
-
bad_weights[i], bad_activations[i] = potentiate(best_weight, best_weights[i], best_activations=best_activation, good_activations=best_activations[i])
|
269
|
+
bad_weights[i], bad_activations[i] = potentiate(best_weight, best_weights[i], best_activations=best_activation, good_activations=best_activations[i], dtype=dtype)
|
258
270
|
|
259
271
|
|
260
272
|
if mutations is True:
|
@@ -263,10 +275,10 @@ Example:
|
|
263
275
|
|
264
276
|
if mutation_prob > bad_genoms_mutation_prob:
|
265
277
|
if (save_best_genom == True and not np.array_equal(best_weights[i], best_weight)) or save_best_genom == False:
|
266
|
-
best_weights[i], best_activations[i] = mutation(best_weights[i], best_activations[i], activation_mutate_prob=activation_mutate_prob, activation_add_prob=activation_add_prob, activation_delete_prob=activation_delete_prob, activation_change_prob=activation_change_prob, weight_mutate_prob=weight_mutate_prob, threshold=weight_mutate_rate)
|
278
|
+
best_weights[i], best_activations[i] = mutation(best_weights[i], best_activations[i], activation_mutate_prob=activation_mutate_prob, activation_add_prob=activation_add_prob, activation_delete_prob=activation_delete_prob, activation_change_prob=activation_change_prob, weight_mutate_prob=weight_mutate_prob, threshold=weight_mutate_rate, dtype=dtype)
|
267
279
|
|
268
280
|
elif mutation_prob < bad_genoms_mutation_prob:
|
269
|
-
bad_weights[i], bad_activations[i] = mutation(bad_weights[i], bad_activations[i], activation_mutate_prob=activation_mutate_prob, activation_add_prob=activation_add_prob, activation_delete_prob=activation_delete_prob, activation_change_prob=activation_change_prob, weight_mutate_prob=weight_mutate_prob, threshold=weight_mutate_rate)
|
281
|
+
bad_weights[i], bad_activations[i] = mutation(bad_weights[i], bad_activations[i], activation_mutate_prob=activation_mutate_prob, activation_add_prob=activation_add_prob, activation_delete_prob=activation_delete_prob, activation_change_prob=activation_change_prob, weight_mutate_prob=weight_mutate_prob, threshold=weight_mutate_rate, dtype=dtype)
|
270
282
|
|
271
283
|
if policy == 'more_selective':
|
272
284
|
|
@@ -274,7 +286,7 @@ Example:
|
|
274
286
|
bad_weights[i], bad_activations[i] = cross_over(best_weight, best_weights[i], best_activations=best_activation, good_activations=best_activations[i], cross_over_mode=cross_over_mode, activation_selection_add_prob=activation_selection_add_prob, activation_selection_change_prob=activation_selection_change_prob, activation_selection_rate=activation_selection_rate)
|
275
287
|
|
276
288
|
elif strategy == 'potentiate':
|
277
|
-
bad_weights[i], bad_activations[i] = potentiate(best_weight, best_weights[i], best_activations=best_activation, good_activations=best_activations[i])
|
289
|
+
bad_weights[i], bad_activations[i] = potentiate(best_weight, best_weights[i], best_activations=best_activation, good_activations=best_activations[i], dtype=dtype)
|
278
290
|
|
279
291
|
if mutations is True:
|
280
292
|
|
@@ -282,10 +294,10 @@ Example:
|
|
282
294
|
|
283
295
|
if mutation_prob > bad_genoms_mutation_prob:
|
284
296
|
if (save_best_genom == True and not np.array_equal(best_weights[i], best_weight)) or save_best_genom == False:
|
285
|
-
best_weights[i], best_activations[i] = mutation(best_weights[i], best_activations[i], activation_mutate_prob=activation_mutate_prob, activation_add_prob=activation_add_prob, activation_delete_prob=activation_delete_prob, activation_change_prob=activation_change_prob, weight_mutate_prob=weight_mutate_prob, threshold=weight_mutate_rate)
|
297
|
+
best_weights[i], best_activations[i] = mutation(best_weights[i], best_activations[i], activation_mutate_prob=activation_mutate_prob, activation_add_prob=activation_add_prob, activation_delete_prob=activation_delete_prob, activation_change_prob=activation_change_prob, weight_mutate_prob=weight_mutate_prob, threshold=weight_mutate_rate, dtype=dtype)
|
286
298
|
|
287
299
|
elif mutation_prob < bad_genoms_mutation_prob:
|
288
|
-
bad_weights[i], bad_activations[i] = mutation(bad_weights[i], bad_activations[i], activation_mutate_prob=activation_mutate_prob, activation_add_prob=activation_add_prob, activation_delete_prob=activation_delete_prob, activation_change_prob=activation_change_prob, weight_mutate_prob=weight_mutate_prob, threshold=weight_mutate_rate)
|
300
|
+
bad_weights[i], bad_activations[i] = mutation(bad_weights[i], bad_activations[i], activation_mutate_prob=activation_mutate_prob, activation_add_prob=activation_add_prob, activation_delete_prob=activation_delete_prob, activation_change_prob=activation_change_prob, weight_mutate_prob=weight_mutate_prob, threshold=weight_mutate_rate, dtype=dtype)
|
289
301
|
|
290
302
|
|
291
303
|
|
@@ -305,11 +317,12 @@ Example:
|
|
305
317
|
|
306
318
|
if mutation_prob > bad_genoms_mutation_prob:
|
307
319
|
if (save_best_genom == True and not np.array_equal(best_weights[i], best_weight)) or save_best_genom == False:
|
308
|
-
best_weights[i], best_activations[i] = mutation(best_weights[i], best_activations[i], activation_mutate_prob=activation_mutate_prob, activation_add_prob=activation_add_prob, activation_delete_prob=activation_delete_prob, activation_change_prob=activation_change_prob, weight_mutate_prob=weight_mutate_prob, threshold=weight_mutate_rate)
|
320
|
+
best_weights[i], best_activations[i] = mutation(best_weights[i], best_activations[i], activation_mutate_prob=activation_mutate_prob, activation_add_prob=activation_add_prob, activation_delete_prob=activation_delete_prob, activation_change_prob=activation_change_prob, weight_mutate_prob=weight_mutate_prob, threshold=weight_mutate_rate, dtype=dtype)
|
309
321
|
|
310
322
|
elif mutation_prob < bad_genoms_mutation_prob:
|
311
|
-
bad_weights[i], bad_activations[i] = mutation(bad_weights[i], bad_activations[i], activation_mutate_prob=activation_mutate_prob, activation_add_prob=activation_add_prob, activation_delete_prob=activation_delete_prob, activation_change_prob=activation_change_prob, weight_mutate_prob=weight_mutate_prob, threshold=weight_mutate_rate)
|
312
|
-
|
323
|
+
bad_weights[i], bad_activations[i] = mutation(bad_weights[i], bad_activations[i], activation_mutate_prob=activation_mutate_prob, activation_add_prob=activation_add_prob, activation_delete_prob=activation_delete_prob, activation_change_prob=activation_change_prob, weight_mutate_prob=weight_mutate_prob, threshold=weight_mutate_rate, dtype=dtype)
|
324
|
+
|
325
|
+
if bar_status: progress.update(1)
|
313
326
|
|
314
327
|
weights = np.vstack((bad_weights, best_weights))
|
315
328
|
activation_potentiations = bad_activations + best_activations
|
@@ -340,18 +353,18 @@ Example:
|
|
340
353
|
print(" ACTIVATION SELECTION RATE (THRESHOLD VALUE FOR SINGLE CROSS OVER):", str(activation_selection_rate) + '\n')
|
341
354
|
|
342
355
|
print("*** Performance ***")
|
343
|
-
print(" MAX
|
344
|
-
print(" MEAN
|
345
|
-
print(" MIN
|
356
|
+
print(" MAX FITNESS: ", str(round(max(fitness), 2)))
|
357
|
+
print(" MEAN FITNESS: ", str(round(np.mean(fitness), 2)))
|
358
|
+
print(" MIN FITNESS: ", str(round(min(fitness), 2)) + '\n')
|
346
359
|
|
347
360
|
print(" BEST GENOME INDEX: ", str(len(weights)-1))
|
348
361
|
print(" NOTE: Genomes are always sorted from the least successful to the most successful according to their performance ranking. Therefore, the genome at the last index is the king of the previous generation. " + '\n')
|
349
362
|
|
350
363
|
|
351
|
-
return
|
364
|
+
return weights, activation_potentiations
|
352
365
|
|
353
366
|
|
354
|
-
def evaluate(x_population, weights, activation_potentiations, rl_mode=False):
|
367
|
+
def evaluate(x_population, weights, activation_potentiations, rl_mode=False, dtype=np.float32):
|
355
368
|
"""
|
356
369
|
Evaluates the performance of a population of genomes, applying different activation functions
|
357
370
|
and weights depending on whether reinforcement learning mode is enabled or not.
|
@@ -367,6 +380,8 @@ def evaluate(x_population, weights, activation_potentiations, rl_mode=False):
|
|
367
380
|
rl_mode (bool, optional): If True, reinforcement learning mode is activated, this accepts x_population is a single genom. (Also weights and activation_potentations a single genomes part.)
|
368
381
|
Default is False.
|
369
382
|
|
383
|
+
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
384
|
+
|
370
385
|
Returns:
|
371
386
|
list: A list of outputs corresponding to each genome in the population after applying the respective
|
372
387
|
activation function and weights.
|
@@ -397,7 +412,7 @@ def evaluate(x_population, weights, activation_potentiations, rl_mode=False):
|
|
397
412
|
### THE OUTPUTS ARE RETURNED AS A PYTHON LIST, WHERE EACH GENOME'S OUTPUT MATCHES ITS INDEX:
|
398
413
|
|
399
414
|
if rl_mode == True:
|
400
|
-
Input = np.array(x_population)
|
415
|
+
Input = np.array(x_population, copy=False, dtype=dtype)
|
401
416
|
Input = Input.ravel()
|
402
417
|
|
403
418
|
if isinstance(activation_potentiations, str):
|
@@ -409,7 +424,7 @@ def evaluate(x_population, weights, activation_potentiations, rl_mode=False):
|
|
409
424
|
outputs = [0] * len(x_population)
|
410
425
|
for i, genome in enumerate(x_population):
|
411
426
|
|
412
|
-
Input = np.array(genome)
|
427
|
+
Input = np.array(genome, copy=False)
|
413
428
|
Input = Input.ravel()
|
414
429
|
|
415
430
|
if isinstance(activation_potentiations[i], str):
|
@@ -543,7 +558,7 @@ def cross_over(best_weight, good_weight, best_activations, good_activations, cro
|
|
543
558
|
|
544
559
|
return new_weight, new_activations
|
545
560
|
|
546
|
-
def potentiate(best_weight, good_weight, best_activations, good_activations):
|
561
|
+
def potentiate(best_weight, good_weight, best_activations, good_activations, dtype=np.float32):
|
547
562
|
"""
|
548
563
|
Combines two sets of weights and activation functions by adding the weight matrices and
|
549
564
|
concatenating the activation functions. The resulting weight matrix is normalized. (Max abs normalization.)
|
@@ -553,6 +568,7 @@ def potentiate(best_weight, good_weight, best_activations, good_activations):
|
|
553
568
|
good_weight (numpy.ndarray): The weight matrix of the second individual (parent).
|
554
569
|
best_activations (str or list): The activation function(s) of the first individual.
|
555
570
|
good_activations (str or list): The activation function(s) of the second individual.
|
571
|
+
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
556
572
|
|
557
573
|
Returns:
|
558
574
|
tuple: A tuple containing:
|
@@ -566,7 +582,7 @@ def potentiate(best_weight, good_weight, best_activations, good_activations):
|
|
566
582
|
"""
|
567
583
|
|
568
584
|
new_weight = best_weight + good_weight
|
569
|
-
new_weight = normalization(new_weight)
|
585
|
+
new_weight = normalization(new_weight, dtype=dtype)
|
570
586
|
|
571
587
|
if isinstance(best_activations, str):
|
572
588
|
best = [best_activations]
|
@@ -584,7 +600,7 @@ def potentiate(best_weight, good_weight, best_activations, good_activations):
|
|
584
600
|
|
585
601
|
return new_weight, new_activations
|
586
602
|
|
587
|
-
def mutation(weight, activations, activation_mutate_prob, activation_add_prob, activation_delete_prob, activation_change_prob, weight_mutate_prob, threshold):
|
603
|
+
def mutation(weight, activations, activation_mutate_prob, activation_add_prob, activation_delete_prob, activation_change_prob, weight_mutate_prob, threshold, dtype=np.float32):
|
588
604
|
"""
|
589
605
|
Performs mutation on the given weight matrix and activation functions.
|
590
606
|
- The weight matrix is mutated by randomly changing its values based on the mutation probability.
|
@@ -599,6 +615,7 @@ def mutation(weight, activations, activation_mutate_prob, activation_add_prob, a
|
|
599
615
|
activation_change_prob (float): Probability of replacing an existing activation function with a new one.
|
600
616
|
weight_mutate_prob (float): The probability of mutating weight matrix.
|
601
617
|
threshold (float): If the value you enter here is equal to the result of input layer * output layer, only a single weight will be mutated during each mutation process. If the value you enter here is half of the result of input layer * output layer, two weights in the weight matrix will be mutated.
|
618
|
+
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
602
619
|
|
603
620
|
Returns:
|
604
621
|
tuple: A tuple containing:
|
@@ -672,7 +689,7 @@ def mutation(weight, activations, activation_mutate_prob, activation_add_prob, a
|
|
672
689
|
|
673
690
|
weight[i,:] = apply_activation(weight[i,:], activations[-1])
|
674
691
|
|
675
|
-
weight = normalization(weight)
|
692
|
+
weight = normalization(weight, dtype=dtype)
|
676
693
|
|
677
694
|
except:
|
678
695
|
|
@@ -686,7 +703,7 @@ def mutation(weight, activations, activation_mutate_prob, activation_add_prob, a
|
|
686
703
|
|
687
704
|
weight[i,:] = apply_activation(weight[i,:], activations[-1])
|
688
705
|
|
689
|
-
weight = normalization(weight)
|
706
|
+
weight = normalization(weight, dtype=dtype)
|
690
707
|
|
691
708
|
if potential_activation_delete_prob > activation_delete_prob and len(activations) > 1:
|
692
709
|
|
@@ -699,7 +716,7 @@ def mutation(weight, activations, activation_mutate_prob, activation_add_prob, a
|
|
699
716
|
weight[i,:] -= wc[i,:]
|
700
717
|
|
701
718
|
activations.pop(random_index)
|
702
|
-
weight = normalization(weight)
|
719
|
+
weight = normalization(weight, dtype=dtype)
|
703
720
|
|
704
721
|
|
705
722
|
if potential_activation_change_prob > activation_change_prob:
|
@@ -715,12 +732,12 @@ def mutation(weight, activations, activation_mutate_prob, activation_add_prob, a
|
|
715
732
|
wc[i,:] = apply_activation(wc[i,:], activations[random_index_genom_act])
|
716
733
|
weight[i,:] -= wc[i,:]
|
717
734
|
|
718
|
-
weight = normalization(weight)
|
735
|
+
weight = normalization(weight, dtype=dtype)
|
719
736
|
|
720
737
|
for i in range(weight.shape[0]):
|
721
738
|
|
722
739
|
weight[i,:] = apply_activation(weight[i,:], activations[random_index_genom_act])
|
723
740
|
|
724
|
-
weight = normalization(weight)
|
741
|
+
weight = normalization(weight, dtype=dtype)
|
725
742
|
|
726
743
|
return weight, activations
|