pyerualjetwork 4.5.2b0__py3-none-any.whl → 4.6b0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +1 -1
- pyerualjetwork/data_operations.py +30 -1
- pyerualjetwork/data_operations_cuda.py +31 -1
- pyerualjetwork/model_operations.py +171 -101
- pyerualjetwork/model_operations_cuda.py +193 -112
- pyerualjetwork/planeat.py +173 -34
- pyerualjetwork/planeat_cuda.py +179 -39
- {pyerualjetwork-4.5.2b0.dist-info → pyerualjetwork-4.6b0.dist-info}/METADATA +1 -1
- {pyerualjetwork-4.5.2b0.dist-info → pyerualjetwork-4.6b0.dist-info}/RECORD +11 -11
- {pyerualjetwork-4.5.2b0.dist-info → pyerualjetwork-4.6b0.dist-info}/WHEEL +0 -0
- {pyerualjetwork-4.5.2b0.dist-info → pyerualjetwork-4.6b0.dist-info}/top_level.txt +0 -0
pyerualjetwork/planeat.py
CHANGED
@@ -17,11 +17,11 @@ import random
|
|
17
17
|
import math
|
18
18
|
|
19
19
|
### LIBRARY IMPORTS ###
|
20
|
-
from .data_operations import normalization, non_neg_normalization
|
20
|
+
from .data_operations import normalization, non_neg_normalization, split_nested_arrays, reconstruct_nested_arrays
|
21
21
|
from .ui import loading_bars, initialize_loading_bar
|
22
22
|
from .activation_functions import apply_activation, all_activations
|
23
23
|
|
24
|
-
def define_genomes(input_shape, output_shape, population_size, dtype=np.float32):
|
24
|
+
def define_genomes(input_shape, output_shape, population_size, hidden=0, neurons=None, activation_functions=None, dtype=np.float32):
|
25
25
|
"""
|
26
26
|
Initializes a population of genomes, where each genome is represented by a set of weights
|
27
27
|
and an associated activation function. Each genome is created with random weights and activation
|
@@ -35,6 +35,12 @@ def define_genomes(input_shape, output_shape, population_size, dtype=np.float32)
|
|
35
35
|
|
36
36
|
population_size (int): The number of genomes (individuals) in the population.
|
37
37
|
|
38
|
+
hidden (int, optional): If you dont want train PLAN model this parameter represents a hidden layer count for MLP model. Default: 0 (PLAN)
|
39
|
+
|
40
|
+
neurons (list[int], optional): If you dont want train PLAN model this parameter represents neuron count of each hidden layer for MLP. Default: None (PLAN)
|
41
|
+
|
42
|
+
activation_functions (list[str], optional): If you dont want train PLAN model this parameter represents activation function of each hidden layer for MLP. Default: None (PLAN) NOTE: THIS EFFECTS HIDDEN LAYERS OUTPUT. NOT OUTPUT LAYER!
|
43
|
+
|
38
44
|
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16.
|
39
45
|
|
40
46
|
Returns:
|
@@ -42,36 +48,68 @@ def define_genomes(input_shape, output_shape, population_size, dtype=np.float32)
|
|
42
48
|
- population_weights (numpy.ndarray): A 2D numpy array of shape (population_size, output_shape, input_shape) representing the
|
43
49
|
weight matrices for each genome.
|
44
50
|
- population_activations (list): A list of activation functions applied to each genome.
|
45
|
-
|
51
|
+
|
46
52
|
Notes:
|
47
53
|
The weights are initialized randomly within the range [-1, 1].
|
48
54
|
Activation functions are selected randomly from a predefined list `all_activations()`.
|
49
55
|
The weights for each genome are then modified by applying the corresponding activation function
|
50
56
|
and normalized using the `normalization()` function. (Max abs normalization.)
|
51
57
|
"""
|
52
|
-
|
53
|
-
|
58
|
+
if hidden > 0:
|
59
|
+
population_weights = [[0] * (hidden + 1) for _ in range(population_size)]
|
60
|
+
population_activations = [[0] * (hidden) for _ in range(population_size)]
|
61
|
+
|
62
|
+
if len(neurons) != hidden:
|
63
|
+
raise ValueError('hidden parameter and neurons list length must be equal.')
|
64
|
+
|
65
|
+
|
66
|
+
for i in range(len(population_weights)):
|
67
|
+
|
68
|
+
for l in range(hidden + 1):
|
69
|
+
|
70
|
+
if l == 0:
|
71
|
+
population_weights[i][l] = np.random.uniform(-1, 1, (neurons[l], input_shape)).astype(dtype)
|
54
72
|
|
55
|
-
|
56
|
-
|
73
|
+
elif l == hidden:
|
74
|
+
population_weights[i][l] = np.random.uniform(-1, 1, (output_shape, neurons[l-1])).astype(dtype)
|
75
|
+
|
76
|
+
else:
|
77
|
+
population_weights[i][l] = np.random.uniform(-1, 1, (neurons[l], neurons[l-1])).astype(dtype)
|
78
|
+
|
79
|
+
if l != hidden:
|
80
|
+
population_activations[i][l] = activation_functions[l]
|
81
|
+
|
82
|
+
# ACTIVATIONS APPLYING IN WEIGHTS SPECIFIC OUTPUT CONNECTIONS (MORE PLAN LIKE FEATURES(FOR NON-LINEARITY)):
|
83
|
+
|
84
|
+
for j in range(population_weights[i][l].shape[0]):
|
85
|
+
|
86
|
+
population_weights[i][l][j,:] = apply_activation(population_weights[i][l][j,:], population_activations[i])
|
87
|
+
population_weights[i][l][j,:] = normalization(population_weights[i][l][j,:], dtype=dtype)
|
88
|
+
|
89
|
+
else:
|
90
|
+
population_weights = [0] * population_size
|
91
|
+
population_activations = [0] * population_size
|
92
|
+
|
93
|
+
except_this = ['spiral', 'circular']
|
94
|
+
activations = [item for item in all_activations() if item not in except_this] # SPIRAL AND CIRCULAR ACTIVATION DISCARDED
|
57
95
|
|
58
|
-
|
96
|
+
for i in range(len(population_weights)):
|
59
97
|
|
60
|
-
|
61
|
-
|
98
|
+
population_weights[i] = np.random.uniform(-1, 1, (output_shape, input_shape)).astype(dtype)
|
99
|
+
population_activations[i] = activations[int(random.uniform(0, len(activations)-1))]
|
62
100
|
|
63
|
-
|
101
|
+
# ACTIVATIONS APPLYING IN WEIGHTS SPECIFIC OUTPUT CONNECTIONS (MORE PLAN LIKE FEATURES(FOR NON-LINEARITY)):
|
64
102
|
|
65
|
-
|
103
|
+
for j in range(population_weights[i].shape[0]):
|
66
104
|
|
67
|
-
|
68
|
-
|
105
|
+
population_weights[i][j,:] = apply_activation(population_weights[i][j,:], population_activations[i])
|
106
|
+
population_weights[i][j,:] = normalization(population_weights[i][j,:], dtype=dtype)
|
69
107
|
|
70
|
-
return
|
108
|
+
return population_weights, population_activations
|
71
109
|
|
72
110
|
|
73
111
|
def evolver(weights,
|
74
|
-
activation_potentiations,
|
112
|
+
activation_potentiations,
|
75
113
|
what_gen,
|
76
114
|
fitness,
|
77
115
|
weight_evolve=True,
|
@@ -88,11 +126,12 @@ def evolver(weights,
|
|
88
126
|
activation_mutate_change_prob=0.5,
|
89
127
|
activation_selection_add_prob=0.5,
|
90
128
|
activation_selection_change_prob=0.5,
|
91
|
-
activation_selection_threshold=
|
129
|
+
activation_selection_threshold=20,
|
92
130
|
activation_mutate_prob=1,
|
93
|
-
activation_mutate_threshold=
|
131
|
+
activation_mutate_threshold=20,
|
94
132
|
weight_mutate_threshold=16,
|
95
133
|
weight_mutate_prob=1,
|
134
|
+
is_mlp=False,
|
96
135
|
dtype=np.float32):
|
97
136
|
"""
|
98
137
|
Applies the evolving process of a population of genomes using selection, crossover, mutation, and activation function potentiation.
|
@@ -105,8 +144,8 @@ def evolver(weights,
|
|
105
144
|
weights (numpy.ndarray): Array of weights for each genome.
|
106
145
|
(first returned value of define_genomes function)
|
107
146
|
|
108
|
-
activation_potentiations (list): A list of activation functions for each genome.
|
109
|
-
(second returned value of define_genomes function)
|
147
|
+
activation_potentiations (list[str]): A list of activation functions for each genome.
|
148
|
+
(second returned value of define_genomes function) NOTE!: 'activation potentiations' for PLAN 'activation functions' for MLP.
|
110
149
|
|
111
150
|
what_gen (int): The current generation number, used for informational purposes or logging.
|
112
151
|
|
@@ -175,10 +214,12 @@ def evolver(weights,
|
|
175
214
|
activation_selection_change_prob (float, optional): The probability of changing an activation function in the genome for crossover.
|
176
215
|
Must be in the range [0, 1]. Default is 0.5.
|
177
216
|
|
178
|
-
activation_mutate_threshold (int, optional): Determines max how much activation mutaiton operation applying. (Function automaticly determines to min) Default:
|
217
|
+
activation_mutate_threshold (int, optional): Determines max how much activation mutaiton operation applying. (Function automaticly determines to min) Default: 20
|
179
218
|
|
180
|
-
activation_selection_threshold (int, optional): Determines max how much activaton transferable to child from undominant parent. (Function automaticly determines to min) Default:
|
219
|
+
activation_selection_threshold (int, optional): Determines max how much activaton transferable to child from undominant parent. (Function automaticly determines to min) Default: 20
|
181
220
|
|
221
|
+
is_mlp (bool, optional): Evolve PLAN model or MLP model ? Default: False (PLAN)
|
222
|
+
|
182
223
|
dtype (numpy.dtype, optional): Data type for the arrays. Default: np.float32.
|
183
224
|
Example: np.float64 or np.float16 [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not recommended!].
|
184
225
|
|
@@ -266,6 +307,34 @@ def evolver(weights,
|
|
266
307
|
|
267
308
|
if weight_evolve is False: origin_weights = np.copy(weights)
|
268
309
|
|
310
|
+
if is_mlp: ### IF EACH GENOME HAVE MORE THEN 1 WEIGHT MATRIX IT IS NOT PLAN MODEL. IT IS MLP MODEL.
|
311
|
+
|
312
|
+
activations = activation_potentiations.copy()
|
313
|
+
|
314
|
+
return mlp_evolver(weights,
|
315
|
+
activations,
|
316
|
+
what_gen,
|
317
|
+
dtype,
|
318
|
+
fitness,
|
319
|
+
policy,
|
320
|
+
bad_genomes_selection_prob,
|
321
|
+
bar_status,
|
322
|
+
strategy,
|
323
|
+
bad_genomes_mutation_prob,
|
324
|
+
fitness_bias,
|
325
|
+
cross_over_mode,
|
326
|
+
activation_mutate_change_prob,
|
327
|
+
activation_selection_change_prob,
|
328
|
+
activation_selection_threshold,
|
329
|
+
activation_mutate_prob,
|
330
|
+
activation_mutate_threshold,
|
331
|
+
weight_mutate_threshold,
|
332
|
+
show_info,
|
333
|
+
weight_mutate_prob,
|
334
|
+
)
|
335
|
+
|
336
|
+
if isinstance(weights, list): weights = np.array(weights, dtype=dtype)
|
337
|
+
|
269
338
|
### FITNESS IS SORTED IN ASCENDING ORDER, AND THE WEIGHT AND ACTIVATIONS OF EACH GENOME ARE SORTED ACCORDING TO THIS ORDER:
|
270
339
|
|
271
340
|
sort_indices = np.argsort(fitness)
|
@@ -408,44 +477,114 @@ def evolver(weights,
|
|
408
477
|
return weights, activation_potentiations
|
409
478
|
|
410
479
|
|
411
|
-
def evaluate(
|
480
|
+
def evaluate(Input, weights, activation_potentiations, is_mlp=False):
|
412
481
|
"""
|
413
482
|
Evaluates the performance of a population of genomes, applying different activation functions
|
414
483
|
and weights depending on whether reinforcement learning mode is enabled or not.
|
415
484
|
|
416
485
|
Args:
|
417
|
-
|
418
|
-
a genome (A list of input features for each genome, or a single set of input features for one genome
|
486
|
+
Input (list or numpy.ndarray): A list or 2D numpy array where each element represents
|
487
|
+
a genome (A list of input features for each genome, or a single set of input features for one genome).
|
419
488
|
weights (list or numpy.ndarray): A list or 2D numpy array of weights corresponding to each genome
|
420
489
|
in `x_population`. This determines the strength of connections.
|
421
490
|
activation_potentiations (list or str): A list where each entry represents an activation function
|
422
491
|
or a potentiation strategy applied to each genome. If only one
|
423
492
|
activation function is used, this can be a single string.
|
493
|
+
is_mlp (bool, optional): Evaluate PLAN model or MLP model ? Default: False (PLAN)
|
494
|
+
|
424
495
|
Returns:
|
425
496
|
list: A list of outputs corresponding to each genome in the population after applying the respective
|
426
497
|
activation function and weights.
|
427
498
|
|
428
499
|
Example:
|
429
500
|
```python
|
430
|
-
outputs = evaluate(
|
501
|
+
outputs = evaluate(Input, weights, activation_potentiations)
|
431
502
|
```
|
432
503
|
|
433
504
|
- The function returns a list of outputs after processing the population, where each element corresponds to
|
434
|
-
the output for each genome in
|
435
|
-
"""
|
505
|
+
the output for each genome in population.
|
506
|
+
"""
|
436
507
|
### THE OUTPUTS ARE RETURNED, WHERE EACH GENOME'S OUTPUT MATCHES ITS INDEX:
|
437
|
-
|
508
|
+
|
438
509
|
|
439
510
|
if isinstance(activation_potentiations, str):
|
440
511
|
activation_potentiations = [activation_potentiations]
|
441
512
|
else:
|
442
513
|
activation_potentiations = [item if isinstance(item, list) else [item] for item in activation_potentiations]
|
443
514
|
|
444
|
-
|
445
|
-
|
515
|
+
if is_mlp:
|
516
|
+
layer = Input
|
517
|
+
for i in range(len(weights)):
|
518
|
+
if i != len(weights) - 1: layer = apply_activation(layer, activation_potentiations[i])
|
519
|
+
layer = layer @ weights[i].T
|
520
|
+
|
521
|
+
return layer
|
446
522
|
|
447
|
-
|
523
|
+
else:
|
448
524
|
|
525
|
+
Input = apply_activation(Input, activation_potentiations)
|
526
|
+
result = Input @ weights.T
|
527
|
+
|
528
|
+
return result
|
529
|
+
|
530
|
+
def mlp_evolver(weights,
|
531
|
+
activations,
|
532
|
+
generation,
|
533
|
+
dtype,
|
534
|
+
fitness,
|
535
|
+
policy,
|
536
|
+
bad_genomes_selection_prob,
|
537
|
+
bar_status,
|
538
|
+
strategy,
|
539
|
+
bad_genomes_mutation_prob,
|
540
|
+
fitness_bias,
|
541
|
+
cross_over_mode,
|
542
|
+
activation_mutate_change_prob,
|
543
|
+
activation_selection_change_prob,
|
544
|
+
activation_selection_threshold,
|
545
|
+
activation_mutate_prob,
|
546
|
+
activation_mutate_threshold,
|
547
|
+
weight_mutate_threshold,
|
548
|
+
show_info,
|
549
|
+
weight_mutate_prob,
|
550
|
+
):
|
551
|
+
|
552
|
+
weights = split_nested_arrays(weights)
|
553
|
+
|
554
|
+
for layer in range(len(weights)):
|
555
|
+
if show_info == True:
|
556
|
+
if layer == len(weights) - 1:
|
557
|
+
show = True
|
558
|
+
else:
|
559
|
+
show = False
|
560
|
+
else:
|
561
|
+
show = False
|
562
|
+
|
563
|
+
weights[layer], activations = evolver(weights[layer], activations,
|
564
|
+
fitness=fitness,
|
565
|
+
what_gen=generation,
|
566
|
+
show_info=show,
|
567
|
+
policy=policy,
|
568
|
+
bad_genomes_selection_prob=bad_genomes_selection_prob,
|
569
|
+
bar_status=bar_status,
|
570
|
+
strategy=strategy,
|
571
|
+
bad_genomes_mutation_prob=bad_genomes_mutation_prob,
|
572
|
+
fitness_bias=fitness_bias,
|
573
|
+
cross_over_mode=cross_over_mode,
|
574
|
+
activation_mutate_add_prob=0,
|
575
|
+
activation_mutate_delete_prob=0,
|
576
|
+
activation_mutate_change_prob=activation_mutate_change_prob,
|
577
|
+
activation_selection_add_prob=0,
|
578
|
+
activation_selection_change_prob=activation_selection_change_prob,
|
579
|
+
activation_selection_threshold=activation_selection_threshold,
|
580
|
+
activation_mutate_prob=activation_mutate_prob,
|
581
|
+
activation_mutate_threshold=activation_mutate_threshold,
|
582
|
+
weight_mutate_threshold=weight_mutate_threshold,
|
583
|
+
weight_mutate_prob=weight_mutate_prob,
|
584
|
+
dtype=dtype
|
585
|
+
)
|
586
|
+
|
587
|
+
return reconstruct_nested_arrays(weights), activations
|
449
588
|
|
450
589
|
def cross_over(first_parent_W,
|
451
590
|
second_parent_W,
|
@@ -748,7 +887,7 @@ def mutation(weight,
|
|
748
887
|
|
749
888
|
if potential_activation_delete_prob > activation_delete_prob and len(activations) > 1:
|
750
889
|
|
751
|
-
random_index = random.randint(0, len(activations)
|
890
|
+
random_index = random.randint(0, len(activations)-1)
|
752
891
|
activations.pop(random_index)
|
753
892
|
|
754
893
|
|
@@ -785,7 +924,7 @@ def mutation(weight,
|
|
785
924
|
def second_parent_selection(good_weights, bad_weights, good_activations, bad_activations, bad_genomes_selection_prob):
|
786
925
|
|
787
926
|
selection_prob = random.uniform(0, 1)
|
788
|
-
random_index = int(random.uniform(0, len(good_weights)
|
927
|
+
random_index = int(random.uniform(0, len(good_weights)-1))
|
789
928
|
|
790
929
|
if selection_prob > bad_genomes_selection_prob:
|
791
930
|
second_selected_W = good_weights[random_index]
|