pyerualjetwork 4.5.3__py3-none-any.whl → 4.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +1 -1
- pyerualjetwork/data_operations.py +30 -1
- pyerualjetwork/data_operations_cuda.py +31 -1
- pyerualjetwork/model_operations.py +177 -101
- pyerualjetwork/model_operations_cuda.py +183 -105
- pyerualjetwork/planeat.py +167 -25
- pyerualjetwork/planeat_cuda.py +172 -30
- {pyerualjetwork-4.5.3.dist-info → pyerualjetwork-4.6.dist-info}/METADATA +1 -1
- {pyerualjetwork-4.5.3.dist-info → pyerualjetwork-4.6.dist-info}/RECORD +11 -11
- {pyerualjetwork-4.5.3.dist-info → pyerualjetwork-4.6.dist-info}/WHEEL +0 -0
- {pyerualjetwork-4.5.3.dist-info → pyerualjetwork-4.6.dist-info}/top_level.txt +0 -0
pyerualjetwork/planeat.py
CHANGED
@@ -17,11 +17,11 @@ import random
|
|
17
17
|
import math
|
18
18
|
|
19
19
|
### LIBRARY IMPORTS ###
|
20
|
-
from .data_operations import normalization, non_neg_normalization
|
20
|
+
from .data_operations import normalization, non_neg_normalization, split_nested_arrays, reconstruct_nested_arrays
|
21
21
|
from .ui import loading_bars, initialize_loading_bar
|
22
22
|
from .activation_functions import apply_activation, all_activations
|
23
23
|
|
24
|
-
def define_genomes(input_shape, output_shape, population_size, dtype=np.float32):
|
24
|
+
def define_genomes(input_shape, output_shape, population_size, hidden=0, neurons=None, activation_functions=None, dtype=np.float32):
|
25
25
|
"""
|
26
26
|
Initializes a population of genomes, where each genome is represented by a set of weights
|
27
27
|
and an associated activation function. Each genome is created with random weights and activation
|
@@ -35,6 +35,12 @@ def define_genomes(input_shape, output_shape, population_size, dtype=np.float32)
|
|
35
35
|
|
36
36
|
population_size (int): The number of genomes (individuals) in the population.
|
37
37
|
|
38
|
+
hidden (int, optional): If you dont want train PLAN model this parameter represents a hidden layer count for MLP model. Default: 0 (PLAN)
|
39
|
+
|
40
|
+
neurons (list[int], optional): If you dont want train PLAN model this parameter represents neuron count of each hidden layer for MLP. Default: None (PLAN)
|
41
|
+
|
42
|
+
activation_functions (list[str], optional): If you dont want train PLAN model this parameter represents activation function of each hidden layer for MLP. Default: None (PLAN) NOTE: THIS EFFECTS HIDDEN LAYERS OUTPUT. NOT OUTPUT LAYER!
|
43
|
+
|
38
44
|
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16.
|
39
45
|
|
40
46
|
Returns:
|
@@ -42,36 +48,70 @@ def define_genomes(input_shape, output_shape, population_size, dtype=np.float32)
|
|
42
48
|
- population_weights (numpy.ndarray): A 2D numpy array of shape (population_size, output_shape, input_shape) representing the
|
43
49
|
weight matrices for each genome.
|
44
50
|
- population_activations (list): A list of activation functions applied to each genome.
|
45
|
-
|
51
|
+
|
46
52
|
Notes:
|
47
53
|
The weights are initialized randomly within the range [-1, 1].
|
48
54
|
Activation functions are selected randomly from a predefined list `all_activations()`.
|
49
55
|
The weights for each genome are then modified by applying the corresponding activation function
|
50
56
|
and normalized using the `normalization()` function. (Max abs normalization.)
|
51
57
|
"""
|
52
|
-
|
53
|
-
|
58
|
+
if hidden > 0:
|
59
|
+
population_weights = [[0] * (hidden + 1) for _ in range(population_size)]
|
60
|
+
population_activations = [[0] * (hidden) for _ in range(population_size)]
|
61
|
+
|
62
|
+
if len(neurons) != hidden:
|
63
|
+
raise ValueError('hidden parameter and neurons list length must be equal.')
|
64
|
+
|
65
|
+
|
66
|
+
for i in range(len(population_weights)):
|
67
|
+
|
68
|
+
for l in range(hidden + 1):
|
69
|
+
|
70
|
+
if l == 0:
|
71
|
+
population_weights[i][l] = np.random.uniform(-1, 1, (neurons[l], input_shape)).astype(dtype)
|
72
|
+
|
73
|
+
elif l == hidden:
|
74
|
+
population_weights[i][l] = np.random.uniform(-1, 1, (output_shape, neurons[l-1])).astype(dtype)
|
75
|
+
|
76
|
+
else:
|
77
|
+
population_weights[i][l] = np.random.uniform(-1, 1, (neurons[l], neurons[l-1])).astype(dtype)
|
78
|
+
|
79
|
+
if l != hidden:
|
80
|
+
population_activations[i][l] = activation_functions[l]
|
81
|
+
|
82
|
+
# ACTIVATIONS APPLYING IN WEIGHTS SPECIFIC OUTPUT CONNECTIONS (MORE PLAN LIKE FEATURES(FOR NON-LINEARITY)):
|
83
|
+
|
84
|
+
for j in range(population_weights[i][l].shape[0]):
|
85
|
+
|
86
|
+
population_weights[i][l][j,:] = apply_activation(population_weights[i][l][j,:], population_activations[i])
|
87
|
+
population_weights[i][l][j,:] = normalization(population_weights[i][l][j,:], dtype=dtype)
|
88
|
+
|
89
|
+
return population_weights, population_activations
|
90
|
+
|
91
|
+
else:
|
92
|
+
population_weights = [0] * population_size
|
93
|
+
population_activations = [0] * population_size
|
54
94
|
|
55
|
-
|
56
|
-
|
95
|
+
except_this = ['spiral', 'circular']
|
96
|
+
activations = [item for item in all_activations() if item not in except_this] # SPIRAL AND CIRCULAR ACTIVATION DISCARDED
|
57
97
|
|
58
|
-
|
98
|
+
for i in range(len(population_weights)):
|
59
99
|
|
60
|
-
|
61
|
-
|
100
|
+
population_weights[i] = np.random.uniform(-1, 1, (output_shape, input_shape)).astype(dtype)
|
101
|
+
population_activations[i] = activations[int(random.uniform(0, len(activations)-1))]
|
62
102
|
|
63
|
-
|
103
|
+
# ACTIVATIONS APPLYING IN WEIGHTS SPECIFIC OUTPUT CONNECTIONS (MORE PLAN LIKE FEATURES(FOR NON-LINEARITY)):
|
64
104
|
|
65
|
-
|
105
|
+
for j in range(population_weights[i].shape[0]):
|
66
106
|
|
67
|
-
|
68
|
-
|
107
|
+
population_weights[i][j,:] = apply_activation(population_weights[i][j,:], population_activations[i])
|
108
|
+
population_weights[i][j,:] = normalization(population_weights[i][j,:], dtype=dtype)
|
69
109
|
|
70
|
-
|
110
|
+
return np.array(population_weights, dtype=dtype), population_activations
|
71
111
|
|
72
112
|
|
73
113
|
def evolver(weights,
|
74
|
-
activation_potentiations,
|
114
|
+
activation_potentiations,
|
75
115
|
what_gen,
|
76
116
|
fitness,
|
77
117
|
weight_evolve=True,
|
@@ -93,6 +133,7 @@ def evolver(weights,
|
|
93
133
|
activation_mutate_threshold=20,
|
94
134
|
weight_mutate_threshold=16,
|
95
135
|
weight_mutate_prob=1,
|
136
|
+
is_mlp=False,
|
96
137
|
dtype=np.float32):
|
97
138
|
"""
|
98
139
|
Applies the evolving process of a population of genomes using selection, crossover, mutation, and activation function potentiation.
|
@@ -105,8 +146,8 @@ def evolver(weights,
|
|
105
146
|
weights (numpy.ndarray): Array of weights for each genome.
|
106
147
|
(first returned value of define_genomes function)
|
107
148
|
|
108
|
-
activation_potentiations (list): A list of activation functions for each genome.
|
109
|
-
(second returned value of define_genomes function)
|
149
|
+
activation_potentiations (list[str]): A list of activation functions for each genome.
|
150
|
+
(second returned value of define_genomes function) NOTE!: 'activation potentiations' for PLAN 'activation functions' for MLP.
|
110
151
|
|
111
152
|
what_gen (int): The current generation number, used for informational purposes or logging.
|
112
153
|
|
@@ -179,6 +220,8 @@ def evolver(weights,
|
|
179
220
|
|
180
221
|
activation_selection_threshold (int, optional): Determines max how much activaton transferable to child from undominant parent. (Function automaticly determines to min) Default: 20
|
181
222
|
|
223
|
+
is_mlp (bool, optional): Evolve PLAN model or MLP model ? Default: False (PLAN)
|
224
|
+
|
182
225
|
dtype (numpy.dtype, optional): Data type for the arrays. Default: np.float32.
|
183
226
|
Example: np.float64 or np.float16 [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not recommended!].
|
184
227
|
|
@@ -266,6 +309,34 @@ def evolver(weights,
|
|
266
309
|
|
267
310
|
if weight_evolve is False: origin_weights = np.copy(weights)
|
268
311
|
|
312
|
+
if is_mlp: ### IF EACH GENOME HAVE MORE THEN 1 WEIGHT MATRIX IT IS NOT PLAN MODEL. IT IS MLP MODEL.
|
313
|
+
|
314
|
+
activations = activation_potentiations.copy()
|
315
|
+
|
316
|
+
return mlp_evolver(weights,
|
317
|
+
activations,
|
318
|
+
what_gen,
|
319
|
+
dtype,
|
320
|
+
fitness,
|
321
|
+
policy,
|
322
|
+
bad_genomes_selection_prob,
|
323
|
+
bar_status,
|
324
|
+
strategy,
|
325
|
+
bad_genomes_mutation_prob,
|
326
|
+
fitness_bias,
|
327
|
+
cross_over_mode,
|
328
|
+
activation_mutate_change_prob,
|
329
|
+
activation_selection_change_prob,
|
330
|
+
activation_selection_threshold,
|
331
|
+
activation_mutate_prob,
|
332
|
+
activation_mutate_threshold,
|
333
|
+
weight_mutate_threshold,
|
334
|
+
show_info,
|
335
|
+
weight_mutate_prob,
|
336
|
+
)
|
337
|
+
|
338
|
+
if isinstance(weights, list): weights = np.array(weights, dtype=dtype)
|
339
|
+
|
269
340
|
### FITNESS IS SORTED IN ASCENDING ORDER, AND THE WEIGHT AND ACTIVATIONS OF EACH GENOME ARE SORTED ACCORDING TO THIS ORDER:
|
270
341
|
|
271
342
|
sort_indices = np.argsort(fitness)
|
@@ -408,7 +479,7 @@ def evolver(weights,
|
|
408
479
|
return weights, activation_potentiations
|
409
480
|
|
410
481
|
|
411
|
-
def evaluate(Input, weights, activation_potentiations):
|
482
|
+
def evaluate(Input, weights, activation_potentiations, is_mlp=False):
|
412
483
|
"""
|
413
484
|
Evaluates the performance of a population of genomes, applying different activation functions
|
414
485
|
and weights depending on whether reinforcement learning mode is enabled or not.
|
@@ -421,6 +492,8 @@ def evaluate(Input, weights, activation_potentiations):
|
|
421
492
|
activation_potentiations (list or str): A list where each entry represents an activation function
|
422
493
|
or a potentiation strategy applied to each genome. If only one
|
423
494
|
activation function is used, this can be a single string.
|
495
|
+
is_mlp (bool, optional): Evaluate PLAN model or MLP model ? Default: False (PLAN)
|
496
|
+
|
424
497
|
Returns:
|
425
498
|
list: A list of outputs corresponding to each genome in the population after applying the respective
|
426
499
|
activation function and weights.
|
@@ -432,19 +505,88 @@ def evaluate(Input, weights, activation_potentiations):
|
|
432
505
|
|
433
506
|
- The function returns a list of outputs after processing the population, where each element corresponds to
|
434
507
|
the output for each genome in population.
|
435
|
-
"""
|
508
|
+
"""
|
436
509
|
### THE OUTPUTS ARE RETURNED, WHERE EACH GENOME'S OUTPUT MATCHES ITS INDEX:
|
437
510
|
|
511
|
+
|
438
512
|
if isinstance(activation_potentiations, str):
|
439
513
|
activation_potentiations = [activation_potentiations]
|
440
514
|
else:
|
441
515
|
activation_potentiations = [item if isinstance(item, list) else [item] for item in activation_potentiations]
|
442
516
|
|
443
|
-
|
444
|
-
|
517
|
+
if is_mlp:
|
518
|
+
layer = Input
|
519
|
+
for i in range(len(weights)):
|
520
|
+
if i != len(weights) - 1: layer = apply_activation(layer, activation_potentiations[i])
|
521
|
+
layer = layer @ weights[i].T
|
522
|
+
|
523
|
+
return layer
|
445
524
|
|
446
|
-
|
525
|
+
else:
|
447
526
|
|
527
|
+
Input = apply_activation(Input, activation_potentiations)
|
528
|
+
result = Input @ weights.T
|
529
|
+
|
530
|
+
return result
|
531
|
+
|
532
|
+
def mlp_evolver(weights,
|
533
|
+
activations,
|
534
|
+
generation,
|
535
|
+
dtype,
|
536
|
+
fitness,
|
537
|
+
policy,
|
538
|
+
bad_genomes_selection_prob,
|
539
|
+
bar_status,
|
540
|
+
strategy,
|
541
|
+
bad_genomes_mutation_prob,
|
542
|
+
fitness_bias,
|
543
|
+
cross_over_mode,
|
544
|
+
activation_mutate_change_prob,
|
545
|
+
activation_selection_change_prob,
|
546
|
+
activation_selection_threshold,
|
547
|
+
activation_mutate_prob,
|
548
|
+
activation_mutate_threshold,
|
549
|
+
weight_mutate_threshold,
|
550
|
+
show_info,
|
551
|
+
weight_mutate_prob,
|
552
|
+
):
|
553
|
+
|
554
|
+
weights = split_nested_arrays(weights)
|
555
|
+
|
556
|
+
for layer in range(len(weights)):
|
557
|
+
if show_info == True:
|
558
|
+
if layer == len(weights) - 1:
|
559
|
+
show = True
|
560
|
+
else:
|
561
|
+
show = False
|
562
|
+
else:
|
563
|
+
show = False
|
564
|
+
|
565
|
+
weights[layer], activations = evolver(weights[layer], activations,
|
566
|
+
fitness=fitness,
|
567
|
+
what_gen=generation,
|
568
|
+
show_info=show,
|
569
|
+
policy=policy,
|
570
|
+
bad_genomes_selection_prob=bad_genomes_selection_prob,
|
571
|
+
bar_status=bar_status,
|
572
|
+
strategy=strategy,
|
573
|
+
bad_genomes_mutation_prob=bad_genomes_mutation_prob,
|
574
|
+
fitness_bias=fitness_bias,
|
575
|
+
cross_over_mode=cross_over_mode,
|
576
|
+
activation_mutate_add_prob=0,
|
577
|
+
activation_mutate_delete_prob=0,
|
578
|
+
activation_mutate_change_prob=activation_mutate_change_prob,
|
579
|
+
activation_selection_add_prob=0,
|
580
|
+
activation_selection_change_prob=activation_selection_change_prob,
|
581
|
+
activation_selection_threshold=activation_selection_threshold,
|
582
|
+
activation_mutate_prob=activation_mutate_prob,
|
583
|
+
activation_mutate_threshold=activation_mutate_threshold,
|
584
|
+
weight_mutate_threshold=weight_mutate_threshold,
|
585
|
+
weight_mutate_prob=weight_mutate_prob,
|
586
|
+
dtype=dtype
|
587
|
+
)
|
588
|
+
|
589
|
+
return reconstruct_nested_arrays(weights), activations
|
448
590
|
|
449
591
|
def cross_over(first_parent_W,
|
450
592
|
second_parent_W,
|
@@ -747,7 +889,7 @@ def mutation(weight,
|
|
747
889
|
|
748
890
|
if potential_activation_delete_prob > activation_delete_prob and len(activations) > 1:
|
749
891
|
|
750
|
-
random_index = random.randint(0, len(activations)
|
892
|
+
random_index = random.randint(0, len(activations)-1)
|
751
893
|
activations.pop(random_index)
|
752
894
|
|
753
895
|
|
@@ -784,7 +926,7 @@ def mutation(weight,
|
|
784
926
|
def second_parent_selection(good_weights, bad_weights, good_activations, bad_activations, bad_genomes_selection_prob):
|
785
927
|
|
786
928
|
selection_prob = random.uniform(0, 1)
|
787
|
-
random_index = int(random.uniform(0, len(good_weights)
|
929
|
+
random_index = int(random.uniform(0, len(good_weights)-1))
|
788
930
|
|
789
931
|
if selection_prob > bad_genomes_selection_prob:
|
790
932
|
second_selected_W = good_weights[random_index]
|
pyerualjetwork/planeat_cuda.py
CHANGED
@@ -19,11 +19,11 @@ import math
|
|
19
19
|
|
20
20
|
|
21
21
|
### LIBRARY IMPORTS ###
|
22
|
-
from .data_operations_cuda import normalization, non_neg_normalization
|
22
|
+
from .data_operations_cuda import normalization, non_neg_normalization, split_nested_arrays, reconstruct_nested_arrays
|
23
23
|
from .ui import loading_bars, initialize_loading_bar
|
24
24
|
from .activation_functions_cuda import apply_activation, all_activations
|
25
25
|
|
26
|
-
def define_genomes(input_shape, output_shape, population_size, dtype=cp.float32):
|
26
|
+
def define_genomes(input_shape, output_shape, population_size, hidden=0, neurons=None, activation_functions=None, dtype=cp.float32):
|
27
27
|
"""
|
28
28
|
Initializes a population of genomes, where each genome is represented by a set of weights
|
29
29
|
and an associated activation function. Each genome is created with random weights and activation
|
@@ -36,6 +36,12 @@ def define_genomes(input_shape, output_shape, population_size, dtype=cp.float32)
|
|
36
36
|
output_shape (int): The number of output features for the neural network.
|
37
37
|
|
38
38
|
population_size (int): The number of genomes (individuals) in the population.
|
39
|
+
|
40
|
+
hidden (int, optional): If you dont want train PLAN model this parameter represents a hidden layer count for MLP model. Default: 0 (PLAN)
|
41
|
+
|
42
|
+
neurons (list[int], optional): If you dont want train PLAN model this parameter represents neuron count of each hidden layer for MLP. Default: None (PLAN)
|
43
|
+
|
44
|
+
activation_functions (list[str], optional): If you dont want train PLAN model this parameter represents activation function of each hidden layer for MLP. Default: None (PLAN) NOTE: THIS EFFECTS HIDDEN LAYERS OUTPUT. NOT OUTPUT LAYER!
|
39
45
|
|
40
46
|
dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16.
|
41
47
|
|
@@ -51,25 +57,59 @@ def define_genomes(input_shape, output_shape, population_size, dtype=cp.float32)
|
|
51
57
|
The weights for each genome are then modified by applying the corresponding activation function
|
52
58
|
and normalized using the `normalization()` function. (Max abs normalization.)
|
53
59
|
"""
|
54
|
-
|
55
|
-
|
60
|
+
if hidden > 0:
|
61
|
+
population_weights = [[0] * (hidden + 1) for _ in range(population_size)]
|
62
|
+
population_activations = [[0] * (hidden) for _ in range(population_size)]
|
63
|
+
|
64
|
+
if len(neurons) != hidden:
|
65
|
+
raise ValueError('hidden parameter and neurons list length must be equal.')
|
66
|
+
|
56
67
|
|
57
|
-
|
58
|
-
activations = [item for item in all_activations() if item not in except_this] # SPIRAL AND CIRCULAR ACTIVATION DISCARDED
|
68
|
+
for i in range(len(population_weights)):
|
59
69
|
|
60
|
-
|
70
|
+
for l in range(hidden + 1):
|
71
|
+
|
72
|
+
if l == 0:
|
73
|
+
population_weights[i][l] = cp.random.uniform(-1, 1, (neurons[l], input_shape)).astype(dtype)
|
74
|
+
|
75
|
+
elif l == hidden:
|
76
|
+
population_weights[i][l] = cp.random.uniform(-1, 1, (output_shape, neurons[l-1])).astype(dtype)
|
77
|
+
|
78
|
+
else:
|
79
|
+
population_weights[i][l] = cp.random.uniform(-1, 1, (neurons[l], neurons[l-1])).astype(dtype)
|
80
|
+
|
81
|
+
if l != hidden:
|
82
|
+
population_activations[i][l] = activation_functions[l]
|
83
|
+
|
84
|
+
# ACTIVATIONS APPLYING IN WEIGHTS SPECIFIC OUTPUT CONNECTIONS (MORE PLAN LIKE FEATURES(FOR NON-LINEARITY)):
|
85
|
+
|
86
|
+
for j in range(population_weights[i][l].shape[0]):
|
87
|
+
|
88
|
+
population_weights[i][l][j,:] = apply_activation(population_weights[i][l][j,:], population_activations[i])
|
89
|
+
population_weights[i][l][j,:] = normalization(population_weights[i][l][j,:], dtype=dtype)
|
90
|
+
|
91
|
+
return population_weights, population_activations
|
92
|
+
|
93
|
+
else:
|
94
|
+
population_weights = [0] * population_size
|
95
|
+
population_activations = [0] * population_size
|
96
|
+
|
97
|
+
except_this = ['spiral', 'circular']
|
98
|
+
activations = [item for item in all_activations() if item not in except_this] # SPIRAL AND CIRCULAR ACTIVATION DISCARDED
|
61
99
|
|
62
|
-
|
63
|
-
population_activations[i] = activations[int(random.uniform(0, len(activations)-1))]
|
100
|
+
for i in range(len(population_weights)):
|
64
101
|
|
65
|
-
|
102
|
+
population_weights[i] = cp.random.uniform(-1, 1, (output_shape, input_shape)).astype(dtype, copy=False)
|
103
|
+
population_activations[i] = activations[int(random.uniform(0, len(activations)-1))]
|
66
104
|
|
67
|
-
|
105
|
+
# ACTIVATIONS APPLYING IN WEIGHTS SPECIFIC OUTPUT CONNECTIONS (MORE PLAN LIKE FEATURES(FOR NON-LINEARITY)):
|
68
106
|
|
69
|
-
|
70
|
-
population_weights[i][j,:] = normalization(population_weights[i][j,:], dtype=dtype)
|
107
|
+
for j in range(population_weights[i].shape[0]):
|
71
108
|
|
72
|
-
|
109
|
+
population_weights[i][j,:] = apply_activation(population_weights[i][j,:], population_activations[i])
|
110
|
+
population_weights[i][j,:] = normalization(population_weights[i][j,:], dtype=dtype)
|
111
|
+
|
112
|
+
return cp.array(population_weights, dtype=dtype), population_activations
|
73
113
|
|
74
114
|
|
75
115
|
def evolver(weights,
|
@@ -94,7 +134,8 @@ def evolver(weights,
|
|
94
134
|
activation_mutate_prob=1,
|
95
135
|
activation_mutate_threshold=20,
|
96
136
|
weight_mutate_threshold=16,
|
97
|
-
weight_mutate_prob=1,
|
137
|
+
weight_mutate_prob=1,
|
138
|
+
is_mlp=False,
|
98
139
|
dtype=cp.float32):
|
99
140
|
"""
|
100
141
|
Applies the evolving process of a population of genomes using selection, crossover, mutation, and activation function potentiation.
|
@@ -107,8 +148,8 @@ def evolver(weights,
|
|
107
148
|
weights (cupy.ndarray): Array of weights for each genome.
|
108
149
|
(first returned value of define_genomes function)
|
109
150
|
|
110
|
-
activation_potentiations (list): A list of activation functions for each genome.
|
111
|
-
(second returned value of define_genomes function)
|
151
|
+
activation_potentiations (list[str]): A list of activation functions for each genome.
|
152
|
+
(second returned value of define_genomes function) NOTE!: 'activation potentiations' for PLAN 'activation functions' for MLP.
|
112
153
|
|
113
154
|
what_gen (int): The current generation number, used for informational purposes or logging.
|
114
155
|
|
@@ -181,6 +222,8 @@ def evolver(weights,
|
|
181
222
|
|
182
223
|
activation_selection_threshold (int, optional): Determines max how much activaton transferable to child from undominant parent. (Function automaticly determines to min) Default: 20
|
183
224
|
|
225
|
+
is_mlp (bool, optional): Evolve PLAN model or MLP model ? Default: False (PLAN)
|
226
|
+
|
184
227
|
dtype (cupy.dtype): Data type for the arrays. Default: cp.float32.
|
185
228
|
Example: cp.float64 or cp.float16 [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not recommended!].
|
186
229
|
|
@@ -267,6 +310,34 @@ def evolver(weights,
|
|
267
310
|
|
268
311
|
if weight_evolve is False: origin_weights = cp.copy(weights)
|
269
312
|
|
313
|
+
if is_mlp: ### IF EACH GENOME HAVE MORE THEN 1 WEIGHT MATRIX IT IS NOT PLAN MODEL. IT IS MLP MODEL.
|
314
|
+
|
315
|
+
activations = activation_potentiations.copy()
|
316
|
+
|
317
|
+
return mlp_evolver(weights,
|
318
|
+
activations,
|
319
|
+
what_gen,
|
320
|
+
dtype,
|
321
|
+
fitness,
|
322
|
+
policy,
|
323
|
+
bad_genomes_selection_prob,
|
324
|
+
bar_status,
|
325
|
+
strategy,
|
326
|
+
bad_genomes_mutation_prob,
|
327
|
+
fitness_bias,
|
328
|
+
cross_over_mode,
|
329
|
+
activation_mutate_change_prob,
|
330
|
+
activation_selection_change_prob,
|
331
|
+
activation_selection_threshold,
|
332
|
+
activation_mutate_prob,
|
333
|
+
activation_mutate_threshold,
|
334
|
+
weight_mutate_threshold,
|
335
|
+
show_info,
|
336
|
+
weight_mutate_prob,
|
337
|
+
)
|
338
|
+
|
339
|
+
if isinstance(weights, list): weights = cp.array(weights, dtype=dtype)
|
340
|
+
|
270
341
|
### FITNESS LIST IS SORTED IN ASCENDING ORDER, AND THE WEIGHT AND ACTIVATIONS OF EACH GENOME ARE SORTED ACCORDING TO THIS ORDER:
|
271
342
|
|
272
343
|
sort_indices = cp.argsort(fitness)
|
@@ -408,7 +479,7 @@ def evolver(weights,
|
|
408
479
|
return weights, activation_potentiations
|
409
480
|
|
410
481
|
|
411
|
-
def evaluate(Input, weights, activation_potentiations):
|
482
|
+
def evaluate(Input, weights, activation_potentiations, is_mlp=False):
|
412
483
|
"""
|
413
484
|
Evaluates the performance of a population of genomes, applying different activation functions
|
414
485
|
and weights depending on whether reinforcement learning mode is enabled or not.
|
@@ -423,6 +494,8 @@ def evaluate(Input, weights, activation_potentiations):
|
|
423
494
|
activation_potentiations (list or str): A list where each entry represents an activation function
|
424
495
|
or a potentiation strategy applied to each genome. If only one
|
425
496
|
activation function is used, this can be a single string.
|
497
|
+
is_mlp (bool, optional): Evaluate PLAN model or MLP model ? Default: False (PLAN)
|
498
|
+
|
426
499
|
Returns:
|
427
500
|
list: A list of outputs corresponding to each genome in the population after applying the respective
|
428
501
|
activation function and weights.
|
@@ -442,11 +515,80 @@ def evaluate(Input, weights, activation_potentiations):
|
|
442
515
|
else:
|
443
516
|
activation_potentiations = [item if isinstance(item, list) else [item] for item in activation_potentiations]
|
444
517
|
|
445
|
-
|
446
|
-
|
518
|
+
if is_mlp:
|
519
|
+
|
520
|
+
layer = Input
|
521
|
+
for i in range(len(weights)):
|
522
|
+
if i != len(weights) - 1: layer = apply_activation(layer, activation_potentiations[i])
|
523
|
+
layer = layer @ weights[i].T
|
524
|
+
|
525
|
+
return layer
|
526
|
+
|
527
|
+
else:
|
447
528
|
|
448
|
-
|
529
|
+
Input = apply_activation(Input, activation_potentiations)
|
530
|
+
result = Input @ weights.T
|
531
|
+
|
532
|
+
return result
|
533
|
+
|
534
|
+
def mlp_evolver(weights,
|
535
|
+
activations,
|
536
|
+
generation,
|
537
|
+
dtype,
|
538
|
+
fitness,
|
539
|
+
policy,
|
540
|
+
bad_genomes_selection_prob,
|
541
|
+
bar_status,
|
542
|
+
strategy,
|
543
|
+
bad_genomes_mutation_prob,
|
544
|
+
fitness_bias,
|
545
|
+
cross_over_mode,
|
546
|
+
activation_mutate_change_prob,
|
547
|
+
activation_selection_change_prob,
|
548
|
+
activation_selection_threshold,
|
549
|
+
activation_mutate_prob,
|
550
|
+
activation_mutate_threshold,
|
551
|
+
weight_mutate_threshold,
|
552
|
+
show_info,
|
553
|
+
weight_mutate_prob,
|
554
|
+
):
|
555
|
+
|
556
|
+
weights = split_nested_arrays(weights)
|
449
557
|
|
558
|
+
for layer in range(len(weights)):
|
559
|
+
if show_info == True:
|
560
|
+
if layer == len(weights) - 1:
|
561
|
+
show = True
|
562
|
+
else:
|
563
|
+
show = False
|
564
|
+
else:
|
565
|
+
show = False
|
566
|
+
|
567
|
+
weights[layer], activations = evolver(weights[layer], activations,
|
568
|
+
fitness=fitness,
|
569
|
+
what_gen=generation,
|
570
|
+
show_info=show,
|
571
|
+
policy=policy,
|
572
|
+
bad_genomes_selection_prob=bad_genomes_selection_prob,
|
573
|
+
bar_status=bar_status,
|
574
|
+
strategy=strategy,
|
575
|
+
bad_genomes_mutation_prob=bad_genomes_mutation_prob,
|
576
|
+
fitness_bias=fitness_bias,
|
577
|
+
cross_over_mode=cross_over_mode,
|
578
|
+
activation_mutate_add_prob=0,
|
579
|
+
activation_mutate_delete_prob=0,
|
580
|
+
activation_mutate_change_prob=activation_mutate_change_prob,
|
581
|
+
activation_selection_add_prob=0,
|
582
|
+
activation_selection_change_prob=activation_selection_change_prob,
|
583
|
+
activation_selection_threshold=activation_selection_threshold,
|
584
|
+
activation_mutate_prob=activation_mutate_prob,
|
585
|
+
activation_mutate_threshold=activation_mutate_threshold,
|
586
|
+
weight_mutate_threshold=weight_mutate_threshold,
|
587
|
+
weight_mutate_prob=weight_mutate_prob,
|
588
|
+
dtype=dtype
|
589
|
+
)
|
590
|
+
|
591
|
+
return reconstruct_nested_arrays(weights), activations
|
450
592
|
|
451
593
|
def cross_over(first_parent_W,
|
452
594
|
second_parent_W,
|
@@ -603,7 +745,7 @@ def cross_over(first_parent_W,
|
|
603
745
|
|
604
746
|
while True:
|
605
747
|
|
606
|
-
random_index = int(random.uniform(0, len(undominant_parent_act)
|
748
|
+
random_index = int(random.uniform(0, len(undominant_parent_act)))
|
607
749
|
random_undominant_activation = undominant_parent_act[random_index]
|
608
750
|
|
609
751
|
child_act.append(random_undominant_activation)
|
@@ -625,8 +767,8 @@ def cross_over(first_parent_W,
|
|
625
767
|
|
626
768
|
while True:
|
627
769
|
|
628
|
-
random_index_undominant = int(random.uniform(0, len(undominant_parent_act)
|
629
|
-
random_index_dominant = int(random.uniform(0, len(dominant_parent_act)
|
770
|
+
random_index_undominant = int(random.uniform(0, len(undominant_parent_act)))
|
771
|
+
random_index_dominant = int(random.uniform(0, len(dominant_parent_act)))
|
630
772
|
random_undominant_activation = undominant_parent_act[random_index_undominant]
|
631
773
|
|
632
774
|
child_act[random_index_dominant] = random_undominant_activation
|
@@ -749,7 +891,7 @@ def mutation(weight,
|
|
749
891
|
|
750
892
|
if potential_activation_delete_prob > activation_delete_prob and len(activations) > 1:
|
751
893
|
|
752
|
-
random_index = random.randint(0, len(activations)
|
894
|
+
random_index = random.randint(0, len(activations))
|
753
895
|
activations.pop(random_index)
|
754
896
|
|
755
897
|
|
@@ -757,7 +899,7 @@ def mutation(weight,
|
|
757
899
|
|
758
900
|
try:
|
759
901
|
|
760
|
-
random_index_all_act = int(random.uniform(0, len(all_acts)
|
902
|
+
random_index_all_act = int(random.uniform(0, len(all_acts)))
|
761
903
|
activations.append(all_acts[random_index_all_act])
|
762
904
|
|
763
905
|
except:
|
@@ -766,12 +908,12 @@ def mutation(weight,
|
|
766
908
|
activations = []
|
767
909
|
|
768
910
|
activations.append(activation)
|
769
|
-
activations.append(all_acts[int(random.uniform(0, len(all_acts)
|
911
|
+
activations.append(all_acts[int(random.uniform(0, len(all_acts)))])
|
770
912
|
|
771
913
|
if potential_activation_change_prob > activation_change_prob:
|
772
914
|
|
773
|
-
random_index_all_act = int(random.uniform(0, len(all_acts)
|
774
|
-
random_index_genom_act = int(random.uniform(0, len(activations)
|
915
|
+
random_index_all_act = int(random.uniform(0, len(all_acts)))
|
916
|
+
random_index_genom_act = int(random.uniform(0, len(activations)))
|
775
917
|
|
776
918
|
activations[random_index_genom_act] = all_acts[random_index_all_act]
|
777
919
|
|
@@ -786,7 +928,7 @@ def mutation(weight,
|
|
786
928
|
def second_parent_selection(good_weights, bad_weights, good_activations, bad_activations, bad_genomes_selection_prob):
|
787
929
|
|
788
930
|
selection_prob = random.uniform(0, 1)
|
789
|
-
random_index = int(random.uniform(0, len(good_weights)
|
931
|
+
random_index = int(random.uniform(0, len(good_weights)))
|
790
932
|
|
791
933
|
if selection_prob > bad_genomes_selection_prob:
|
792
934
|
second_selected_W = good_weights[random_index]
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 4.
|
3
|
+
Version: 4.6
|
4
4
|
Summary: PyerualJetwork is a machine learning library supported with GPU(CUDA) acceleration written in Python for professionals and researchers including with PLAN algorithm, PLANEAT algorithm (genetic optimization). Also includes data pre-process and memory manegament
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -1,8 +1,8 @@
|
|
1
|
-
pyerualjetwork/__init__.py,sha256=
|
1
|
+
pyerualjetwork/__init__.py,sha256=x-t4HMHrI5Jkh28DDHqIbhtTCHe6IaNhH1zLkC38phI,1277
|
2
2
|
pyerualjetwork/activation_functions.py,sha256=Ms0AGBqkJuCA42ht64MSQnO54Td_1eDGquedpoBDVbc,7642
|
3
3
|
pyerualjetwork/activation_functions_cuda.py,sha256=5y1Ti3GDfDteQDCUmODwe7tAyDAUlDTKmIikChQ8d6g,7772
|
4
|
-
pyerualjetwork/data_operations.py,sha256=
|
5
|
-
pyerualjetwork/data_operations_cuda.py,sha256=
|
4
|
+
pyerualjetwork/data_operations.py,sha256=TxfpwHWUpJ2E7IVF2uSmigrwpVL_JxrvGPOrMg2lNuI,15981
|
5
|
+
pyerualjetwork/data_operations_cuda.py,sha256=5e8EO-XRplSmmXcZJxxEISdxO3297ShsAKHswTh3kGQ,18084
|
6
6
|
pyerualjetwork/fitness_functions.py,sha256=urRdeMvUhNgWxD4ZGHCRdQlIf9cTWYMvF3_aVBojRqY,1235
|
7
7
|
pyerualjetwork/help.py,sha256=nQ_YbYA2RtuafhuvkreNpX0WWL1I_nzlelwCtvei0_Y,775
|
8
8
|
pyerualjetwork/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
|
@@ -10,16 +10,16 @@ pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqd
|
|
10
10
|
pyerualjetwork/memory_operations.py,sha256=0yCOHcgiNyF4ccMcRlL1Q9F_byG4nzjhmkbpXE_yU6E,13401
|
11
11
|
pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
|
12
12
|
pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
|
13
|
-
pyerualjetwork/model_operations.py,sha256=
|
14
|
-
pyerualjetwork/model_operations_cuda.py,sha256=
|
13
|
+
pyerualjetwork/model_operations.py,sha256=cL0dGhsRxVnftNgMoghU03prw32czOgdSJM00jGASEk,15174
|
14
|
+
pyerualjetwork/model_operations_cuda.py,sha256=70SN64I6NnJzUm0IrOwHSw93oS4j_Vhi7pkk540DMXM,15780
|
15
15
|
pyerualjetwork/plan.py,sha256=UyIvPmvHCHwczlc9KHolE4y6CPEeBfhnRN5yznSbnoM,23028
|
16
16
|
pyerualjetwork/plan_cuda.py,sha256=iteqgv7x9Z2Pj4vGOZs6HXS3r0bNaF_smr7ZXaOdRnw,23990
|
17
|
-
pyerualjetwork/planeat.py,sha256=
|
18
|
-
pyerualjetwork/planeat_cuda.py,sha256=
|
17
|
+
pyerualjetwork/planeat.py,sha256=hZIzDbdRjyCA-wdraD0yJyG-Y8J2KadEqlITs-M_jPQ,45281
|
18
|
+
pyerualjetwork/planeat_cuda.py,sha256=uOvhTxG36jVu8_uHN8jSxGQqbwpSIPKqbXT1sFl0kU8,45326
|
19
19
|
pyerualjetwork/ui.py,sha256=JBTFYz5R24XwNKhA3GSW-oYAoiIBxAE3kFGXkvm5gqw,656
|
20
20
|
pyerualjetwork/visualizations.py,sha256=utnX9zQhzmtvBJLOLNGm2jecVVk4zHXABQdjb0XzJac,28352
|
21
21
|
pyerualjetwork/visualizations_cuda.py,sha256=gnoaaazZ-nc9E1ImqXrZBRgQ4Rnpi2qh2yGJ2eLKMlE,28807
|
22
|
-
pyerualjetwork-4.
|
23
|
-
pyerualjetwork-4.
|
24
|
-
pyerualjetwork-4.
|
25
|
-
pyerualjetwork-4.
|
22
|
+
pyerualjetwork-4.6.dist-info/METADATA,sha256=aPR80cOeYMf7cRbWopJqwMUzvvZqvZOouaGV__rKu8I,7503
|
23
|
+
pyerualjetwork-4.6.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
24
|
+
pyerualjetwork-4.6.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
|
25
|
+
pyerualjetwork-4.6.dist-info/RECORD,,
|