pyerualjetwork 4.5.3__py3-none-any.whl → 4.6b0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +1 -1
- pyerualjetwork/data_operations.py +30 -1
- pyerualjetwork/data_operations_cuda.py +31 -1
- pyerualjetwork/model_operations.py +171 -101
- pyerualjetwork/model_operations_cuda.py +193 -112
- pyerualjetwork/planeat.py +165 -25
- pyerualjetwork/planeat_cuda.py +170 -30
- {pyerualjetwork-4.5.3.dist-info → pyerualjetwork-4.6b0.dist-info}/METADATA +1 -1
- {pyerualjetwork-4.5.3.dist-info → pyerualjetwork-4.6b0.dist-info}/RECORD +11 -11
- {pyerualjetwork-4.5.3.dist-info → pyerualjetwork-4.6b0.dist-info}/WHEEL +0 -0
- {pyerualjetwork-4.5.3.dist-info → pyerualjetwork-4.6b0.dist-info}/top_level.txt +0 -0
pyerualjetwork/planeat.py
CHANGED
@@ -17,11 +17,11 @@ import random
|
|
17
17
|
import math
|
18
18
|
|
19
19
|
### LIBRARY IMPORTS ###
|
20
|
-
from .data_operations import normalization, non_neg_normalization
|
20
|
+
from .data_operations import normalization, non_neg_normalization, split_nested_arrays, reconstruct_nested_arrays
|
21
21
|
from .ui import loading_bars, initialize_loading_bar
|
22
22
|
from .activation_functions import apply_activation, all_activations
|
23
23
|
|
24
|
-
def define_genomes(input_shape, output_shape, population_size, dtype=np.float32):
|
24
|
+
def define_genomes(input_shape, output_shape, population_size, hidden=0, neurons=None, activation_functions=None, dtype=np.float32):
|
25
25
|
"""
|
26
26
|
Initializes a population of genomes, where each genome is represented by a set of weights
|
27
27
|
and an associated activation function. Each genome is created with random weights and activation
|
@@ -35,6 +35,12 @@ def define_genomes(input_shape, output_shape, population_size, dtype=np.float32)
|
|
35
35
|
|
36
36
|
population_size (int): The number of genomes (individuals) in the population.
|
37
37
|
|
38
|
+
hidden (int, optional): If you dont want train PLAN model this parameter represents a hidden layer count for MLP model. Default: 0 (PLAN)
|
39
|
+
|
40
|
+
neurons (list[int], optional): If you dont want train PLAN model this parameter represents neuron count of each hidden layer for MLP. Default: None (PLAN)
|
41
|
+
|
42
|
+
activation_functions (list[str], optional): If you dont want train PLAN model this parameter represents activation function of each hidden layer for MLP. Default: None (PLAN) NOTE: THIS EFFECTS HIDDEN LAYERS OUTPUT. NOT OUTPUT LAYER!
|
43
|
+
|
38
44
|
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16.
|
39
45
|
|
40
46
|
Returns:
|
@@ -42,36 +48,68 @@ def define_genomes(input_shape, output_shape, population_size, dtype=np.float32)
|
|
42
48
|
- population_weights (numpy.ndarray): A 2D numpy array of shape (population_size, output_shape, input_shape) representing the
|
43
49
|
weight matrices for each genome.
|
44
50
|
- population_activations (list): A list of activation functions applied to each genome.
|
45
|
-
|
51
|
+
|
46
52
|
Notes:
|
47
53
|
The weights are initialized randomly within the range [-1, 1].
|
48
54
|
Activation functions are selected randomly from a predefined list `all_activations()`.
|
49
55
|
The weights for each genome are then modified by applying the corresponding activation function
|
50
56
|
and normalized using the `normalization()` function. (Max abs normalization.)
|
51
57
|
"""
|
52
|
-
|
53
|
-
|
58
|
+
if hidden > 0:
|
59
|
+
population_weights = [[0] * (hidden + 1) for _ in range(population_size)]
|
60
|
+
population_activations = [[0] * (hidden) for _ in range(population_size)]
|
61
|
+
|
62
|
+
if len(neurons) != hidden:
|
63
|
+
raise ValueError('hidden parameter and neurons list length must be equal.')
|
64
|
+
|
65
|
+
|
66
|
+
for i in range(len(population_weights)):
|
67
|
+
|
68
|
+
for l in range(hidden + 1):
|
69
|
+
|
70
|
+
if l == 0:
|
71
|
+
population_weights[i][l] = np.random.uniform(-1, 1, (neurons[l], input_shape)).astype(dtype)
|
72
|
+
|
73
|
+
elif l == hidden:
|
74
|
+
population_weights[i][l] = np.random.uniform(-1, 1, (output_shape, neurons[l-1])).astype(dtype)
|
75
|
+
|
76
|
+
else:
|
77
|
+
population_weights[i][l] = np.random.uniform(-1, 1, (neurons[l], neurons[l-1])).astype(dtype)
|
78
|
+
|
79
|
+
if l != hidden:
|
80
|
+
population_activations[i][l] = activation_functions[l]
|
81
|
+
|
82
|
+
# ACTIVATIONS APPLYING IN WEIGHTS SPECIFIC OUTPUT CONNECTIONS (MORE PLAN LIKE FEATURES(FOR NON-LINEARITY)):
|
54
83
|
|
55
|
-
|
56
|
-
activations = [item for item in all_activations() if item not in except_this] # SPIRAL AND CIRCULAR ACTIVATION DISCARDED
|
84
|
+
for j in range(population_weights[i][l].shape[0]):
|
57
85
|
|
58
|
-
|
86
|
+
population_weights[i][l][j,:] = apply_activation(population_weights[i][l][j,:], population_activations[i])
|
87
|
+
population_weights[i][l][j,:] = normalization(population_weights[i][l][j,:], dtype=dtype)
|
88
|
+
|
89
|
+
else:
|
90
|
+
population_weights = [0] * population_size
|
91
|
+
population_activations = [0] * population_size
|
92
|
+
|
93
|
+
except_this = ['spiral', 'circular']
|
94
|
+
activations = [item for item in all_activations() if item not in except_this] # SPIRAL AND CIRCULAR ACTIVATION DISCARDED
|
59
95
|
|
60
|
-
|
61
|
-
population_activations[i] = activations[int(random.uniform(0, len(activations)-1))]
|
96
|
+
for i in range(len(population_weights)):
|
62
97
|
|
63
|
-
|
98
|
+
population_weights[i] = np.random.uniform(-1, 1, (output_shape, input_shape)).astype(dtype)
|
99
|
+
population_activations[i] = activations[int(random.uniform(0, len(activations)-1))]
|
64
100
|
|
65
|
-
|
101
|
+
# ACTIVATIONS APPLYING IN WEIGHTS SPECIFIC OUTPUT CONNECTIONS (MORE PLAN LIKE FEATURES(FOR NON-LINEARITY)):
|
66
102
|
|
67
|
-
|
68
|
-
population_weights[i][j,:] = normalization(population_weights[i][j,:], dtype=dtype)
|
103
|
+
for j in range(population_weights[i].shape[0]):
|
69
104
|
|
70
|
-
|
105
|
+
population_weights[i][j,:] = apply_activation(population_weights[i][j,:], population_activations[i])
|
106
|
+
population_weights[i][j,:] = normalization(population_weights[i][j,:], dtype=dtype)
|
107
|
+
|
108
|
+
return population_weights, population_activations
|
71
109
|
|
72
110
|
|
73
111
|
def evolver(weights,
|
74
|
-
activation_potentiations,
|
112
|
+
activation_potentiations,
|
75
113
|
what_gen,
|
76
114
|
fitness,
|
77
115
|
weight_evolve=True,
|
@@ -93,6 +131,7 @@ def evolver(weights,
|
|
93
131
|
activation_mutate_threshold=20,
|
94
132
|
weight_mutate_threshold=16,
|
95
133
|
weight_mutate_prob=1,
|
134
|
+
is_mlp=False,
|
96
135
|
dtype=np.float32):
|
97
136
|
"""
|
98
137
|
Applies the evolving process of a population of genomes using selection, crossover, mutation, and activation function potentiation.
|
@@ -105,8 +144,8 @@ def evolver(weights,
|
|
105
144
|
weights (numpy.ndarray): Array of weights for each genome.
|
106
145
|
(first returned value of define_genomes function)
|
107
146
|
|
108
|
-
activation_potentiations (list): A list of activation functions for each genome.
|
109
|
-
(second returned value of define_genomes function)
|
147
|
+
activation_potentiations (list[str]): A list of activation functions for each genome.
|
148
|
+
(second returned value of define_genomes function) NOTE!: 'activation potentiations' for PLAN 'activation functions' for MLP.
|
110
149
|
|
111
150
|
what_gen (int): The current generation number, used for informational purposes or logging.
|
112
151
|
|
@@ -179,6 +218,8 @@ def evolver(weights,
|
|
179
218
|
|
180
219
|
activation_selection_threshold (int, optional): Determines max how much activaton transferable to child from undominant parent. (Function automaticly determines to min) Default: 20
|
181
220
|
|
221
|
+
is_mlp (bool, optional): Evolve PLAN model or MLP model ? Default: False (PLAN)
|
222
|
+
|
182
223
|
dtype (numpy.dtype, optional): Data type for the arrays. Default: np.float32.
|
183
224
|
Example: np.float64 or np.float16 [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not recommended!].
|
184
225
|
|
@@ -266,6 +307,34 @@ def evolver(weights,
|
|
266
307
|
|
267
308
|
if weight_evolve is False: origin_weights = np.copy(weights)
|
268
309
|
|
310
|
+
if is_mlp: ### IF EACH GENOME HAVE MORE THEN 1 WEIGHT MATRIX IT IS NOT PLAN MODEL. IT IS MLP MODEL.
|
311
|
+
|
312
|
+
activations = activation_potentiations.copy()
|
313
|
+
|
314
|
+
return mlp_evolver(weights,
|
315
|
+
activations,
|
316
|
+
what_gen,
|
317
|
+
dtype,
|
318
|
+
fitness,
|
319
|
+
policy,
|
320
|
+
bad_genomes_selection_prob,
|
321
|
+
bar_status,
|
322
|
+
strategy,
|
323
|
+
bad_genomes_mutation_prob,
|
324
|
+
fitness_bias,
|
325
|
+
cross_over_mode,
|
326
|
+
activation_mutate_change_prob,
|
327
|
+
activation_selection_change_prob,
|
328
|
+
activation_selection_threshold,
|
329
|
+
activation_mutate_prob,
|
330
|
+
activation_mutate_threshold,
|
331
|
+
weight_mutate_threshold,
|
332
|
+
show_info,
|
333
|
+
weight_mutate_prob,
|
334
|
+
)
|
335
|
+
|
336
|
+
if isinstance(weights, list): weights = np.array(weights, dtype=dtype)
|
337
|
+
|
269
338
|
### FITNESS IS SORTED IN ASCENDING ORDER, AND THE WEIGHT AND ACTIVATIONS OF EACH GENOME ARE SORTED ACCORDING TO THIS ORDER:
|
270
339
|
|
271
340
|
sort_indices = np.argsort(fitness)
|
@@ -408,7 +477,7 @@ def evolver(weights,
|
|
408
477
|
return weights, activation_potentiations
|
409
478
|
|
410
479
|
|
411
|
-
def evaluate(Input, weights, activation_potentiations):
|
480
|
+
def evaluate(Input, weights, activation_potentiations, is_mlp=False):
|
412
481
|
"""
|
413
482
|
Evaluates the performance of a population of genomes, applying different activation functions
|
414
483
|
and weights depending on whether reinforcement learning mode is enabled or not.
|
@@ -421,6 +490,8 @@ def evaluate(Input, weights, activation_potentiations):
|
|
421
490
|
activation_potentiations (list or str): A list where each entry represents an activation function
|
422
491
|
or a potentiation strategy applied to each genome. If only one
|
423
492
|
activation function is used, this can be a single string.
|
493
|
+
is_mlp (bool, optional): Evaluate PLAN model or MLP model ? Default: False (PLAN)
|
494
|
+
|
424
495
|
Returns:
|
425
496
|
list: A list of outputs corresponding to each genome in the population after applying the respective
|
426
497
|
activation function and weights.
|
@@ -432,19 +503,88 @@ def evaluate(Input, weights, activation_potentiations):
|
|
432
503
|
|
433
504
|
- The function returns a list of outputs after processing the population, where each element corresponds to
|
434
505
|
the output for each genome in population.
|
435
|
-
"""
|
506
|
+
"""
|
436
507
|
### THE OUTPUTS ARE RETURNED, WHERE EACH GENOME'S OUTPUT MATCHES ITS INDEX:
|
437
508
|
|
509
|
+
|
438
510
|
if isinstance(activation_potentiations, str):
|
439
511
|
activation_potentiations = [activation_potentiations]
|
440
512
|
else:
|
441
513
|
activation_potentiations = [item if isinstance(item, list) else [item] for item in activation_potentiations]
|
442
514
|
|
443
|
-
|
444
|
-
|
515
|
+
if is_mlp:
|
516
|
+
layer = Input
|
517
|
+
for i in range(len(weights)):
|
518
|
+
if i != len(weights) - 1: layer = apply_activation(layer, activation_potentiations[i])
|
519
|
+
layer = layer @ weights[i].T
|
520
|
+
|
521
|
+
return layer
|
522
|
+
|
523
|
+
else:
|
445
524
|
|
446
|
-
|
525
|
+
Input = apply_activation(Input, activation_potentiations)
|
526
|
+
result = Input @ weights.T
|
527
|
+
|
528
|
+
return result
|
529
|
+
|
530
|
+
def mlp_evolver(weights,
|
531
|
+
activations,
|
532
|
+
generation,
|
533
|
+
dtype,
|
534
|
+
fitness,
|
535
|
+
policy,
|
536
|
+
bad_genomes_selection_prob,
|
537
|
+
bar_status,
|
538
|
+
strategy,
|
539
|
+
bad_genomes_mutation_prob,
|
540
|
+
fitness_bias,
|
541
|
+
cross_over_mode,
|
542
|
+
activation_mutate_change_prob,
|
543
|
+
activation_selection_change_prob,
|
544
|
+
activation_selection_threshold,
|
545
|
+
activation_mutate_prob,
|
546
|
+
activation_mutate_threshold,
|
547
|
+
weight_mutate_threshold,
|
548
|
+
show_info,
|
549
|
+
weight_mutate_prob,
|
550
|
+
):
|
551
|
+
|
552
|
+
weights = split_nested_arrays(weights)
|
447
553
|
|
554
|
+
for layer in range(len(weights)):
|
555
|
+
if show_info == True:
|
556
|
+
if layer == len(weights) - 1:
|
557
|
+
show = True
|
558
|
+
else:
|
559
|
+
show = False
|
560
|
+
else:
|
561
|
+
show = False
|
562
|
+
|
563
|
+
weights[layer], activations = evolver(weights[layer], activations,
|
564
|
+
fitness=fitness,
|
565
|
+
what_gen=generation,
|
566
|
+
show_info=show,
|
567
|
+
policy=policy,
|
568
|
+
bad_genomes_selection_prob=bad_genomes_selection_prob,
|
569
|
+
bar_status=bar_status,
|
570
|
+
strategy=strategy,
|
571
|
+
bad_genomes_mutation_prob=bad_genomes_mutation_prob,
|
572
|
+
fitness_bias=fitness_bias,
|
573
|
+
cross_over_mode=cross_over_mode,
|
574
|
+
activation_mutate_add_prob=0,
|
575
|
+
activation_mutate_delete_prob=0,
|
576
|
+
activation_mutate_change_prob=activation_mutate_change_prob,
|
577
|
+
activation_selection_add_prob=0,
|
578
|
+
activation_selection_change_prob=activation_selection_change_prob,
|
579
|
+
activation_selection_threshold=activation_selection_threshold,
|
580
|
+
activation_mutate_prob=activation_mutate_prob,
|
581
|
+
activation_mutate_threshold=activation_mutate_threshold,
|
582
|
+
weight_mutate_threshold=weight_mutate_threshold,
|
583
|
+
weight_mutate_prob=weight_mutate_prob,
|
584
|
+
dtype=dtype
|
585
|
+
)
|
586
|
+
|
587
|
+
return reconstruct_nested_arrays(weights), activations
|
448
588
|
|
449
589
|
def cross_over(first_parent_W,
|
450
590
|
second_parent_W,
|
@@ -747,7 +887,7 @@ def mutation(weight,
|
|
747
887
|
|
748
888
|
if potential_activation_delete_prob > activation_delete_prob and len(activations) > 1:
|
749
889
|
|
750
|
-
random_index = random.randint(0, len(activations)
|
890
|
+
random_index = random.randint(0, len(activations)-1)
|
751
891
|
activations.pop(random_index)
|
752
892
|
|
753
893
|
|
@@ -784,7 +924,7 @@ def mutation(weight,
|
|
784
924
|
def second_parent_selection(good_weights, bad_weights, good_activations, bad_activations, bad_genomes_selection_prob):
|
785
925
|
|
786
926
|
selection_prob = random.uniform(0, 1)
|
787
|
-
random_index = int(random.uniform(0, len(good_weights)
|
927
|
+
random_index = int(random.uniform(0, len(good_weights)-1))
|
788
928
|
|
789
929
|
if selection_prob > bad_genomes_selection_prob:
|
790
930
|
second_selected_W = good_weights[random_index]
|
pyerualjetwork/planeat_cuda.py
CHANGED
@@ -19,11 +19,11 @@ import math
|
|
19
19
|
|
20
20
|
|
21
21
|
### LIBRARY IMPORTS ###
|
22
|
-
from .data_operations_cuda import normalization, non_neg_normalization
|
22
|
+
from .data_operations_cuda import normalization, non_neg_normalization, split_nested_arrays, reconstruct_nested_arrays
|
23
23
|
from .ui import loading_bars, initialize_loading_bar
|
24
24
|
from .activation_functions_cuda import apply_activation, all_activations
|
25
25
|
|
26
|
-
def define_genomes(input_shape, output_shape, population_size, dtype=cp.float32):
|
26
|
+
def define_genomes(input_shape, output_shape, population_size, hidden=0, neurons=None, activation_functions=None, dtype=cp.float32):
|
27
27
|
"""
|
28
28
|
Initializes a population of genomes, where each genome is represented by a set of weights
|
29
29
|
and an associated activation function. Each genome is created with random weights and activation
|
@@ -36,6 +36,12 @@ def define_genomes(input_shape, output_shape, population_size, dtype=cp.float32)
|
|
36
36
|
output_shape (int): The number of output features for the neural network.
|
37
37
|
|
38
38
|
population_size (int): The number of genomes (individuals) in the population.
|
39
|
+
|
40
|
+
hidden (int, optional): If you dont want train PLAN model this parameter represents a hidden layer count for MLP model. Default: 0 (PLAN)
|
41
|
+
|
42
|
+
neurons (list[int], optional): If you dont want train PLAN model this parameter represents neuron count of each hidden layer for MLP. Default: None (PLAN)
|
43
|
+
|
44
|
+
activation_functions (list[str], optional): If you dont want train PLAN model this parameter represents activation function of each hidden layer for MLP. Default: None (PLAN) NOTE: THIS EFFECTS HIDDEN LAYERS OUTPUT. NOT OUTPUT LAYER!
|
39
45
|
|
40
46
|
dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16.
|
41
47
|
|
@@ -51,25 +57,57 @@ def define_genomes(input_shape, output_shape, population_size, dtype=cp.float32)
|
|
51
57
|
The weights for each genome are then modified by applying the corresponding activation function
|
52
58
|
and normalized using the `normalization()` function. (Max abs normalization.)
|
53
59
|
"""
|
54
|
-
|
55
|
-
|
60
|
+
if hidden > 0:
|
61
|
+
population_weights = [[0] * (hidden + 1) for _ in range(population_size)]
|
62
|
+
population_activations = [[0] * (hidden) for _ in range(population_size)]
|
63
|
+
|
64
|
+
if len(neurons) != hidden:
|
65
|
+
raise ValueError('hidden parameter and neurons list length must be equal.')
|
66
|
+
|
67
|
+
|
68
|
+
for i in range(len(population_weights)):
|
69
|
+
|
70
|
+
for l in range(hidden + 1):
|
71
|
+
|
72
|
+
if l == 0:
|
73
|
+
population_weights[i][l] = cp.random.uniform(-1, 1, (neurons[l], input_shape)).astype(dtype)
|
74
|
+
|
75
|
+
elif l == hidden:
|
76
|
+
population_weights[i][l] = cp.random.uniform(-1, 1, (output_shape, neurons[l-1])).astype(dtype)
|
77
|
+
|
78
|
+
else:
|
79
|
+
population_weights[i][l] = cp.random.uniform(-1, 1, (neurons[l], neurons[l-1])).astype(dtype)
|
80
|
+
|
81
|
+
if l != hidden:
|
82
|
+
population_activations[i][l] = activation_functions[l]
|
83
|
+
|
84
|
+
# ACTIVATIONS APPLYING IN WEIGHTS SPECIFIC OUTPUT CONNECTIONS (MORE PLAN LIKE FEATURES(FOR NON-LINEARITY)):
|
85
|
+
|
86
|
+
for j in range(population_weights[i][l].shape[0]):
|
87
|
+
|
88
|
+
population_weights[i][l][j,:] = apply_activation(population_weights[i][l][j,:], population_activations[i])
|
89
|
+
population_weights[i][l][j,:] = normalization(population_weights[i][l][j,:], dtype=dtype)
|
90
|
+
|
91
|
+
else:
|
92
|
+
population_weights = [0] * population_size
|
93
|
+
population_activations = [0] * population_size
|
56
94
|
|
57
|
-
|
58
|
-
|
95
|
+
except_this = ['spiral', 'circular']
|
96
|
+
activations = [item for item in all_activations() if item not in except_this] # SPIRAL AND CIRCULAR ACTIVATION DISCARDED
|
59
97
|
|
60
|
-
|
98
|
+
for i in range(len(population_weights)):
|
61
99
|
|
62
|
-
|
63
|
-
|
100
|
+
population_weights[i] = cp.random.uniform(-1, 1, (output_shape, input_shape)).astype(dtype, copy=False)
|
101
|
+
population_activations[i] = activations[int(random.uniform(0, len(activations)-1))]
|
64
102
|
|
65
|
-
|
103
|
+
# ACTIVATIONS APPLYING IN WEIGHTS SPECIFIC OUTPUT CONNECTIONS (MORE PLAN LIKE FEATURES(FOR NON-LINEARITY)):
|
66
104
|
|
67
|
-
|
105
|
+
for j in range(population_weights[i].shape[0]):
|
68
106
|
|
69
|
-
|
70
|
-
|
107
|
+
population_weights[i][j,:] = apply_activation(population_weights[i][j,:], population_activations[i])
|
108
|
+
population_weights[i][j,:] = normalization(population_weights[i][j,:], dtype=dtype)
|
71
109
|
|
72
|
-
return
|
110
|
+
return population_weights, population_activations
|
73
111
|
|
74
112
|
|
75
113
|
def evolver(weights,
|
@@ -94,7 +132,8 @@ def evolver(weights,
|
|
94
132
|
activation_mutate_prob=1,
|
95
133
|
activation_mutate_threshold=20,
|
96
134
|
weight_mutate_threshold=16,
|
97
|
-
weight_mutate_prob=1,
|
135
|
+
weight_mutate_prob=1,
|
136
|
+
is_mlp=False,
|
98
137
|
dtype=cp.float32):
|
99
138
|
"""
|
100
139
|
Applies the evolving process of a population of genomes using selection, crossover, mutation, and activation function potentiation.
|
@@ -107,8 +146,8 @@ def evolver(weights,
|
|
107
146
|
weights (cupy.ndarray): Array of weights for each genome.
|
108
147
|
(first returned value of define_genomes function)
|
109
148
|
|
110
|
-
activation_potentiations (list): A list of activation functions for each genome.
|
111
|
-
(second returned value of define_genomes function)
|
149
|
+
activation_potentiations (list[str]): A list of activation functions for each genome.
|
150
|
+
(second returned value of define_genomes function) NOTE!: 'activation potentiations' for PLAN 'activation functions' for MLP.
|
112
151
|
|
113
152
|
what_gen (int): The current generation number, used for informational purposes or logging.
|
114
153
|
|
@@ -181,6 +220,8 @@ def evolver(weights,
|
|
181
220
|
|
182
221
|
activation_selection_threshold (int, optional): Determines max how much activaton transferable to child from undominant parent. (Function automaticly determines to min) Default: 20
|
183
222
|
|
223
|
+
is_mlp (bool, optional): Evolve PLAN model or MLP model ? Default: False (PLAN)
|
224
|
+
|
184
225
|
dtype (cupy.dtype): Data type for the arrays. Default: cp.float32.
|
185
226
|
Example: cp.float64 or cp.float16 [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not recommended!].
|
186
227
|
|
@@ -267,6 +308,34 @@ def evolver(weights,
|
|
267
308
|
|
268
309
|
if weight_evolve is False: origin_weights = cp.copy(weights)
|
269
310
|
|
311
|
+
if is_mlp: ### IF EACH GENOME HAVE MORE THEN 1 WEIGHT MATRIX IT IS NOT PLAN MODEL. IT IS MLP MODEL.
|
312
|
+
|
313
|
+
activations = activation_potentiations.copy()
|
314
|
+
|
315
|
+
return mlp_evolver(weights,
|
316
|
+
activations,
|
317
|
+
what_gen,
|
318
|
+
dtype,
|
319
|
+
fitness,
|
320
|
+
policy,
|
321
|
+
bad_genomes_selection_prob,
|
322
|
+
bar_status,
|
323
|
+
strategy,
|
324
|
+
bad_genomes_mutation_prob,
|
325
|
+
fitness_bias,
|
326
|
+
cross_over_mode,
|
327
|
+
activation_mutate_change_prob,
|
328
|
+
activation_selection_change_prob,
|
329
|
+
activation_selection_threshold,
|
330
|
+
activation_mutate_prob,
|
331
|
+
activation_mutate_threshold,
|
332
|
+
weight_mutate_threshold,
|
333
|
+
show_info,
|
334
|
+
weight_mutate_prob,
|
335
|
+
)
|
336
|
+
|
337
|
+
if isinstance(weights, list): weights = cp.array(weights, dtype=dtype)
|
338
|
+
|
270
339
|
### FITNESS LIST IS SORTED IN ASCENDING ORDER, AND THE WEIGHT AND ACTIVATIONS OF EACH GENOME ARE SORTED ACCORDING TO THIS ORDER:
|
271
340
|
|
272
341
|
sort_indices = cp.argsort(fitness)
|
@@ -408,7 +477,7 @@ def evolver(weights,
|
|
408
477
|
return weights, activation_potentiations
|
409
478
|
|
410
479
|
|
411
|
-
def evaluate(Input, weights, activation_potentiations):
|
480
|
+
def evaluate(Input, weights, activation_potentiations, is_mlp=False):
|
412
481
|
"""
|
413
482
|
Evaluates the performance of a population of genomes, applying different activation functions
|
414
483
|
and weights depending on whether reinforcement learning mode is enabled or not.
|
@@ -423,6 +492,8 @@ def evaluate(Input, weights, activation_potentiations):
|
|
423
492
|
activation_potentiations (list or str): A list where each entry represents an activation function
|
424
493
|
or a potentiation strategy applied to each genome. If only one
|
425
494
|
activation function is used, this can be a single string.
|
495
|
+
is_mlp (bool, optional): Evaluate PLAN model or MLP model ? Default: False (PLAN)
|
496
|
+
|
426
497
|
Returns:
|
427
498
|
list: A list of outputs corresponding to each genome in the population after applying the respective
|
428
499
|
activation function and weights.
|
@@ -442,11 +513,80 @@ def evaluate(Input, weights, activation_potentiations):
|
|
442
513
|
else:
|
443
514
|
activation_potentiations = [item if isinstance(item, list) else [item] for item in activation_potentiations]
|
444
515
|
|
445
|
-
|
446
|
-
|
516
|
+
if is_mlp:
|
517
|
+
|
518
|
+
layer = Input
|
519
|
+
for i in range(len(weights)):
|
520
|
+
if i != len(weights) - 1: layer = apply_activation(layer, activation_potentiations[i])
|
521
|
+
layer = layer @ weights[i].T
|
522
|
+
|
523
|
+
return layer
|
524
|
+
|
525
|
+
else:
|
447
526
|
|
448
|
-
|
527
|
+
Input = apply_activation(Input, activation_potentiations)
|
528
|
+
result = Input @ weights.T
|
529
|
+
|
530
|
+
return result
|
531
|
+
|
532
|
+
def mlp_evolver(weights,
|
533
|
+
activations,
|
534
|
+
generation,
|
535
|
+
dtype,
|
536
|
+
fitness,
|
537
|
+
policy,
|
538
|
+
bad_genomes_selection_prob,
|
539
|
+
bar_status,
|
540
|
+
strategy,
|
541
|
+
bad_genomes_mutation_prob,
|
542
|
+
fitness_bias,
|
543
|
+
cross_over_mode,
|
544
|
+
activation_mutate_change_prob,
|
545
|
+
activation_selection_change_prob,
|
546
|
+
activation_selection_threshold,
|
547
|
+
activation_mutate_prob,
|
548
|
+
activation_mutate_threshold,
|
549
|
+
weight_mutate_threshold,
|
550
|
+
show_info,
|
551
|
+
weight_mutate_prob,
|
552
|
+
):
|
553
|
+
|
554
|
+
weights = split_nested_arrays(weights)
|
449
555
|
|
556
|
+
for layer in range(len(weights)):
|
557
|
+
if show_info == True:
|
558
|
+
if layer == len(weights) - 1:
|
559
|
+
show = True
|
560
|
+
else:
|
561
|
+
show = False
|
562
|
+
else:
|
563
|
+
show = False
|
564
|
+
|
565
|
+
weights[layer], activations = evolver(weights[layer], activations,
|
566
|
+
fitness=fitness,
|
567
|
+
what_gen=generation,
|
568
|
+
show_info=show,
|
569
|
+
policy=policy,
|
570
|
+
bad_genomes_selection_prob=bad_genomes_selection_prob,
|
571
|
+
bar_status=bar_status,
|
572
|
+
strategy=strategy,
|
573
|
+
bad_genomes_mutation_prob=bad_genomes_mutation_prob,
|
574
|
+
fitness_bias=fitness_bias,
|
575
|
+
cross_over_mode=cross_over_mode,
|
576
|
+
activation_mutate_add_prob=0,
|
577
|
+
activation_mutate_delete_prob=0,
|
578
|
+
activation_mutate_change_prob=activation_mutate_change_prob,
|
579
|
+
activation_selection_add_prob=0,
|
580
|
+
activation_selection_change_prob=activation_selection_change_prob,
|
581
|
+
activation_selection_threshold=activation_selection_threshold,
|
582
|
+
activation_mutate_prob=activation_mutate_prob,
|
583
|
+
activation_mutate_threshold=activation_mutate_threshold,
|
584
|
+
weight_mutate_threshold=weight_mutate_threshold,
|
585
|
+
weight_mutate_prob=weight_mutate_prob,
|
586
|
+
dtype=dtype
|
587
|
+
)
|
588
|
+
|
589
|
+
return reconstruct_nested_arrays(weights), activations
|
450
590
|
|
451
591
|
def cross_over(first_parent_W,
|
452
592
|
second_parent_W,
|
@@ -603,7 +743,7 @@ def cross_over(first_parent_W,
|
|
603
743
|
|
604
744
|
while True:
|
605
745
|
|
606
|
-
random_index = int(random.uniform(0, len(undominant_parent_act)
|
746
|
+
random_index = int(random.uniform(0, len(undominant_parent_act)))
|
607
747
|
random_undominant_activation = undominant_parent_act[random_index]
|
608
748
|
|
609
749
|
child_act.append(random_undominant_activation)
|
@@ -625,8 +765,8 @@ def cross_over(first_parent_W,
|
|
625
765
|
|
626
766
|
while True:
|
627
767
|
|
628
|
-
random_index_undominant = int(random.uniform(0, len(undominant_parent_act)
|
629
|
-
random_index_dominant = int(random.uniform(0, len(dominant_parent_act)
|
768
|
+
random_index_undominant = int(random.uniform(0, len(undominant_parent_act)))
|
769
|
+
random_index_dominant = int(random.uniform(0, len(dominant_parent_act)))
|
630
770
|
random_undominant_activation = undominant_parent_act[random_index_undominant]
|
631
771
|
|
632
772
|
child_act[random_index_dominant] = random_undominant_activation
|
@@ -749,7 +889,7 @@ def mutation(weight,
|
|
749
889
|
|
750
890
|
if potential_activation_delete_prob > activation_delete_prob and len(activations) > 1:
|
751
891
|
|
752
|
-
random_index = random.randint(0, len(activations)
|
892
|
+
random_index = random.randint(0, len(activations))
|
753
893
|
activations.pop(random_index)
|
754
894
|
|
755
895
|
|
@@ -757,7 +897,7 @@ def mutation(weight,
|
|
757
897
|
|
758
898
|
try:
|
759
899
|
|
760
|
-
random_index_all_act = int(random.uniform(0, len(all_acts)
|
900
|
+
random_index_all_act = int(random.uniform(0, len(all_acts)))
|
761
901
|
activations.append(all_acts[random_index_all_act])
|
762
902
|
|
763
903
|
except:
|
@@ -766,12 +906,12 @@ def mutation(weight,
|
|
766
906
|
activations = []
|
767
907
|
|
768
908
|
activations.append(activation)
|
769
|
-
activations.append(all_acts[int(random.uniform(0, len(all_acts)
|
909
|
+
activations.append(all_acts[int(random.uniform(0, len(all_acts)))])
|
770
910
|
|
771
911
|
if potential_activation_change_prob > activation_change_prob:
|
772
912
|
|
773
|
-
random_index_all_act = int(random.uniform(0, len(all_acts)
|
774
|
-
random_index_genom_act = int(random.uniform(0, len(activations)
|
913
|
+
random_index_all_act = int(random.uniform(0, len(all_acts)))
|
914
|
+
random_index_genom_act = int(random.uniform(0, len(activations)))
|
775
915
|
|
776
916
|
activations[random_index_genom_act] = all_acts[random_index_all_act]
|
777
917
|
|
@@ -786,7 +926,7 @@ def mutation(weight,
|
|
786
926
|
def second_parent_selection(good_weights, bad_weights, good_activations, bad_activations, bad_genomes_selection_prob):
|
787
927
|
|
788
928
|
selection_prob = random.uniform(0, 1)
|
789
|
-
random_index = int(random.uniform(0, len(good_weights)
|
929
|
+
random_index = int(random.uniform(0, len(good_weights)))
|
790
930
|
|
791
931
|
if selection_prob > bad_genomes_selection_prob:
|
792
932
|
second_selected_W = good_weights[random_index]
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 4.
|
3
|
+
Version: 4.6b0
|
4
4
|
Summary: PyerualJetwork is a machine learning library supported with GPU(CUDA) acceleration written in Python for professionals and researchers including with PLAN algorithm, PLANEAT algorithm (genetic optimization). Also includes data pre-process and memory manegament
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -1,8 +1,8 @@
|
|
1
|
-
pyerualjetwork/__init__.py,sha256=
|
1
|
+
pyerualjetwork/__init__.py,sha256=KLgnJmN6LoSApf-e8QKuNBxyBM3QqRZOSsUEVy5jL58,1279
|
2
2
|
pyerualjetwork/activation_functions.py,sha256=Ms0AGBqkJuCA42ht64MSQnO54Td_1eDGquedpoBDVbc,7642
|
3
3
|
pyerualjetwork/activation_functions_cuda.py,sha256=5y1Ti3GDfDteQDCUmODwe7tAyDAUlDTKmIikChQ8d6g,7772
|
4
|
-
pyerualjetwork/data_operations.py,sha256=
|
5
|
-
pyerualjetwork/data_operations_cuda.py,sha256=
|
4
|
+
pyerualjetwork/data_operations.py,sha256=TxfpwHWUpJ2E7IVF2uSmigrwpVL_JxrvGPOrMg2lNuI,15981
|
5
|
+
pyerualjetwork/data_operations_cuda.py,sha256=5e8EO-XRplSmmXcZJxxEISdxO3297ShsAKHswTh3kGQ,18084
|
6
6
|
pyerualjetwork/fitness_functions.py,sha256=urRdeMvUhNgWxD4ZGHCRdQlIf9cTWYMvF3_aVBojRqY,1235
|
7
7
|
pyerualjetwork/help.py,sha256=nQ_YbYA2RtuafhuvkreNpX0WWL1I_nzlelwCtvei0_Y,775
|
8
8
|
pyerualjetwork/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
|
@@ -10,16 +10,16 @@ pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqd
|
|
10
10
|
pyerualjetwork/memory_operations.py,sha256=0yCOHcgiNyF4ccMcRlL1Q9F_byG4nzjhmkbpXE_yU6E,13401
|
11
11
|
pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
|
12
12
|
pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
|
13
|
-
pyerualjetwork/model_operations.py,sha256=
|
14
|
-
pyerualjetwork/model_operations_cuda.py,sha256=
|
13
|
+
pyerualjetwork/model_operations.py,sha256=aARz97TFaVX5lkdKSV-4cp3OT0KSiRUYO7ktpfCmr44,14854
|
14
|
+
pyerualjetwork/model_operations_cuda.py,sha256=ZpnqaJYNb8Wyc5htEY0qXcZA4N9LLPDiHEBjrIqvGCI,15745
|
15
15
|
pyerualjetwork/plan.py,sha256=UyIvPmvHCHwczlc9KHolE4y6CPEeBfhnRN5yznSbnoM,23028
|
16
16
|
pyerualjetwork/plan_cuda.py,sha256=iteqgv7x9Z2Pj4vGOZs6HXS3r0bNaF_smr7ZXaOdRnw,23990
|
17
|
-
pyerualjetwork/planeat.py,sha256=
|
18
|
-
pyerualjetwork/planeat_cuda.py,sha256=
|
17
|
+
pyerualjetwork/planeat.py,sha256=RjTB2GN1Dp6Qbm6w5iDCEIR056KzQc4vyJ06Gce0iWQ,45193
|
18
|
+
pyerualjetwork/planeat_cuda.py,sha256=cIkrjT9rjeyKhcrv3x_YXYqr2yeKe8yC1VblFqULg88,45241
|
19
19
|
pyerualjetwork/ui.py,sha256=JBTFYz5R24XwNKhA3GSW-oYAoiIBxAE3kFGXkvm5gqw,656
|
20
20
|
pyerualjetwork/visualizations.py,sha256=utnX9zQhzmtvBJLOLNGm2jecVVk4zHXABQdjb0XzJac,28352
|
21
21
|
pyerualjetwork/visualizations_cuda.py,sha256=gnoaaazZ-nc9E1ImqXrZBRgQ4Rnpi2qh2yGJ2eLKMlE,28807
|
22
|
-
pyerualjetwork-4.
|
23
|
-
pyerualjetwork-4.
|
24
|
-
pyerualjetwork-4.
|
25
|
-
pyerualjetwork-4.
|
22
|
+
pyerualjetwork-4.6b0.dist-info/METADATA,sha256=xS9cCvZpXLGKrVHK0VSQP49HnPKeN5PoyDSscUXzDDo,7505
|
23
|
+
pyerualjetwork-4.6b0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
24
|
+
pyerualjetwork-4.6b0.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
|
25
|
+
pyerualjetwork-4.6b0.dist-info/RECORD,,
|
File without changes
|