pyerualjetwork 4.3.9b5__py3-none-any.whl → 4.3.9b6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +1 -1
- pyerualjetwork/plan.py +16 -13
- pyerualjetwork/plan_cuda.py +13 -10
- pyerualjetwork/planeat.py +132 -52
- pyerualjetwork/planeat_cuda.py +91 -42
- {pyerualjetwork-4.3.9b5.dist-info → pyerualjetwork-4.3.9b6.dist-info}/METADATA +1 -1
- {pyerualjetwork-4.3.9b5.dist-info → pyerualjetwork-4.3.9b6.dist-info}/RECORD +9 -9
- {pyerualjetwork-4.3.9b5.dist-info → pyerualjetwork-4.3.9b6.dist-info}/WHEEL +0 -0
- {pyerualjetwork-4.3.9b5.dist-info → pyerualjetwork-4.3.9b6.dist-info}/top_level.txt +0 -0
pyerualjetwork/__init__.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
__version__ = "4.3.
|
1
|
+
__version__ = "4.3.9b6"
|
2
2
|
__update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
|
3
3
|
|
4
4
|
def print_version(__version__):
|
pyerualjetwork/plan.py
CHANGED
@@ -83,7 +83,7 @@ def fit(
|
|
83
83
|
return normalization(weight, dtype=dtype)
|
84
84
|
|
85
85
|
|
86
|
-
def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1, pop_size=None,
|
86
|
+
def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', gen=None, batch_size=1, pop_size=None,
|
87
87
|
neural_web_history=False, show_current_activations=False, auto_normalization=False,
|
88
88
|
neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
|
89
89
|
interval=33.33, target_acc=None, target_loss=None,
|
@@ -120,7 +120,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
120
120
|
interval=16.67)
|
121
121
|
```
|
122
122
|
|
123
|
-
fit_start (bool): If the fit_start parameter is set to True, the initial generation population undergoes a simple short training process using the PLAN algorithm. This allows for a very robust starting point, especially for large and complex datasets. However, for small or relatively simple datasets, it may result in unnecessary computational overhead. When fit_start is True, completing the first generation may take slightly longer (this increase in computational cost applies only to the first generation and does not affect subsequent generations). If fit_start is set to False, the initial population will be entirely random. Options: True or False.
|
123
|
+
fit_start (bool, optional): If the fit_start parameter is set to True, the initial generation population undergoes a simple short training process using the PLAN algorithm. This allows for a very robust starting point, especially for large and complex datasets. However, for small or relatively simple datasets, it may result in unnecessary computational overhead. When fit_start is True, completing the first generation may take slightly longer (this increase in computational cost applies only to the first generation and does not affect subsequent generations). If fit_start is set to False, the initial population will be entirely random. Options: True or False. Default: True
|
124
124
|
|
125
125
|
strategy (str, optional): Learning strategy. (options: 'accuracy', 'f1', 'precision', 'recall'): 'accuracy', Maximizes train (or test if given) accuracy during learning. 'f1', Maximizes train (or test if given) f1 score during learning. 'precision', Maximizes train (or test if given) precision score during learning. 'recall', Maximizes train (or test if given) recall during learning. Default is 'accuracy'.
|
126
126
|
|
@@ -202,15 +202,15 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
202
202
|
|
203
203
|
if fit_start is False or pop_size > activation_potentiation_len:
|
204
204
|
weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=pop_size, dtype=dtype)
|
205
|
-
|
206
|
-
if start_this_act is not None and start_this_W is not None:
|
207
|
-
weight_pop[0] = start_this_W
|
208
|
-
act_pop[0] = start_this_act
|
209
205
|
|
210
206
|
else:
|
211
207
|
weight_pop = [0] * pop_size
|
212
208
|
act_pop = [0] * pop_size
|
213
209
|
|
210
|
+
if start_this_act is not None and start_this_W is not None:
|
211
|
+
weight_pop[0] = start_this_W
|
212
|
+
act_pop[0] = start_this_act
|
213
|
+
|
214
214
|
for i in range(gen):
|
215
215
|
postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
|
216
216
|
progress.set_postfix(postfix_dict)
|
@@ -224,9 +224,12 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
224
224
|
x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
|
225
225
|
|
226
226
|
if fit_start is True and i == 0 and j < activation_potentiation_len:
|
227
|
-
|
228
|
-
|
229
|
-
|
227
|
+
if start_this_act is not None and j == 0:
|
228
|
+
pass
|
229
|
+
else:
|
230
|
+
act_pop[j] = activation_potentiation[j]
|
231
|
+
W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
|
232
|
+
weight_pop[j] = W
|
230
233
|
|
231
234
|
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
|
232
235
|
acc = model[get_acc()]
|
@@ -309,7 +312,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
309
312
|
if target_acc is not None and best_acc >= target_acc:
|
310
313
|
progress.close()
|
311
314
|
train_model = evaluate(x_train, y_train, W=best_weights,
|
312
|
-
activation_potentiation=final_activations
|
315
|
+
activation_potentiation=final_activations)
|
313
316
|
|
314
317
|
if loss == 'categorical_crossentropy':
|
315
318
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
@@ -331,7 +334,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
331
334
|
if target_loss is not None and best_loss <= target_loss:
|
332
335
|
progress.close()
|
333
336
|
train_model = evaluate(x_train, y_train, W=best_weights,
|
334
|
-
activation_potentiation=final_activations
|
337
|
+
activation_potentiation=final_activations)
|
335
338
|
|
336
339
|
if loss == 'categorical_crossentropy':
|
337
340
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
@@ -380,7 +383,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
380
383
|
if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
|
381
384
|
progress.close()
|
382
385
|
train_model = evaluate(x_train, y_train, W=best_weights,
|
383
|
-
activation_potentiation=final_activations
|
386
|
+
activation_potentiation=final_activations)
|
384
387
|
|
385
388
|
if loss == 'categorical_crossentropy':
|
386
389
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
@@ -401,7 +404,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
401
404
|
# Final evaluation
|
402
405
|
progress.close()
|
403
406
|
train_model = evaluate(x_train, y_train, W=best_weights,
|
404
|
-
activation_potentiation=final_activations
|
407
|
+
activation_potentiation=final_activations)
|
405
408
|
|
406
409
|
if loss == 'categorical_crossentropy':
|
407
410
|
train_loss = categorical_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
|
pyerualjetwork/plan_cuda.py
CHANGED
@@ -82,7 +82,7 @@ def fit(
|
|
82
82
|
return normalization(weight, dtype=dtype)
|
83
83
|
|
84
84
|
|
85
|
-
def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1, pop_size=None,
|
85
|
+
def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', gen=None, batch_size=1, pop_size=None,
|
86
86
|
neural_web_history=False, show_current_activations=False, auto_normalization=False,
|
87
87
|
neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
|
88
88
|
interval=33.33, target_acc=None, target_loss=None,
|
@@ -118,7 +118,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
118
118
|
batch_size=0.05,
|
119
119
|
interval=16.67)
|
120
120
|
```
|
121
|
-
fit_start (bool): If the fit_start parameter is set to True, the initial generation population undergoes a simple short training process using the PLAN algorithm. This allows for a very robust starting point, especially for large and complex datasets. However, for small or relatively simple datasets, it may result in unnecessary computational overhead. When fit_start is True, completing the first generation may take slightly longer (this increase in computational cost applies only to the first generation and does not affect subsequent generations). If fit_start is set to False, the initial population will be entirely random. Options: True or False.
|
121
|
+
fit_start (bool, optional): If the fit_start parameter is set to True, the initial generation population undergoes a simple short training process using the PLAN algorithm. This allows for a very robust starting point, especially for large and complex datasets. However, for small or relatively simple datasets, it may result in unnecessary computational overhead. When fit_start is True, completing the first generation may take slightly longer (this increase in computational cost applies only to the first generation and does not affect subsequent generations). If fit_start is set to False, the initial population will be entirely random. Options: True or False. Default: True
|
122
122
|
|
123
123
|
strategy (str, optional): Learning strategy. (options: 'accuracy', 'f1', 'precision', 'recall'): 'accuracy', Maximizes train (or test if given) accuracy during learning. 'f1', Maximizes train (or test if given) f1 score during learning. 'precision', Maximizes train (or test if given) precision score during learning. 'recall', Maximizes train (or test if given) recall during learning. Default is 'accuracy'.
|
124
124
|
|
@@ -210,15 +210,15 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
210
210
|
|
211
211
|
if fit_start is False or pop_size > activation_potentiation_len:
|
212
212
|
weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=pop_size, dtype=dtype)
|
213
|
-
|
214
|
-
if start_this_act is not None and start_this_W is not None:
|
215
|
-
weight_pop[0] = start_this_W
|
216
|
-
act_pop[0] = start_this_act
|
217
213
|
|
218
214
|
else:
|
219
215
|
weight_pop = [0] * pop_size
|
220
216
|
act_pop = [0] * pop_size
|
221
217
|
|
218
|
+
if start_this_act is not None and start_this_W is not None:
|
219
|
+
weight_pop[0] = start_this_W
|
220
|
+
act_pop[0] = start_this_act
|
221
|
+
|
222
222
|
for i in range(gen):
|
223
223
|
postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
|
224
224
|
progress.set_postfix(postfix_dict)
|
@@ -235,9 +235,12 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
235
235
|
y_train_batch = cp.array(y_train_batch, dtype=dtype, copy=False)
|
236
236
|
|
237
237
|
if fit_start is True and i == 0 and j < activation_potentiation_len:
|
238
|
-
|
239
|
-
|
240
|
-
|
238
|
+
if start_this_act is not None and j == 0:
|
239
|
+
pass
|
240
|
+
else:
|
241
|
+
act_pop[j] = activation_potentiation[j]
|
242
|
+
W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
|
243
|
+
weight_pop[j] = W
|
241
244
|
|
242
245
|
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
|
243
246
|
acc = model[get_acc()]
|
@@ -319,7 +322,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
319
322
|
# Check target accuracy
|
320
323
|
if target_acc is not None and best_acc >= target_acc:
|
321
324
|
progress.close()
|
322
|
-
train_model = evaluate(x_train, y_train, W=best_weights,
|
325
|
+
train_model = evaluate(x_train, y_train, W=best_weights,
|
323
326
|
activation_potentiation=final_activations)
|
324
327
|
|
325
328
|
if loss == 'categorical_crossentropy':
|
pyerualjetwork/planeat.py
CHANGED
@@ -17,9 +17,10 @@ import random
|
|
17
17
|
import math
|
18
18
|
|
19
19
|
### LIBRARY IMPORTS ###
|
20
|
+
from .plan import feed_forward
|
20
21
|
from .data_operations import normalization
|
21
22
|
from .ui import loading_bars, initialize_loading_bar
|
22
|
-
from
|
23
|
+
from. activation_functions import apply_activation, all_activations
|
23
24
|
|
24
25
|
def define_genomes(input_shape, output_shape, population_size, dtype=np.float32):
|
25
26
|
"""
|
@@ -296,7 +297,6 @@ def evolver(weights,
|
|
296
297
|
mutated_W = np.copy(bad_weights)
|
297
298
|
mutated_act = bad_activations.copy()
|
298
299
|
|
299
|
-
|
300
300
|
for i in range(len(bad_weights)):
|
301
301
|
|
302
302
|
if policy == 'aggressive':
|
@@ -399,7 +399,7 @@ def evolver(weights,
|
|
399
399
|
return weights, activation_potentiations
|
400
400
|
|
401
401
|
|
402
|
-
def evaluate(x_population, weights, activation_potentiations):
|
402
|
+
def evaluate(x_population, weights, activation_potentiations, rl_mode=False, dtype=np.float32):
|
403
403
|
"""
|
404
404
|
Evaluates the performance of a population of genomes, applying different activation functions
|
405
405
|
and weights depending on whether reinforcement learning mode is enabled or not.
|
@@ -412,30 +412,62 @@ def evaluate(x_population, weights, activation_potentiations):
|
|
412
412
|
activation_potentiations (list or str): A list where each entry represents an activation function
|
413
413
|
or a potentiation strategy applied to each genome. If only one
|
414
414
|
activation function is used, this can be a single string.
|
415
|
+
rl_mode (bool, optional): If True, reinforcement learning mode is activated, this accepts x_population is a single genome. (Also weights and activation_potentations a single genomes part.)
|
416
|
+
Default is False.
|
417
|
+
|
418
|
+
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
419
|
+
|
415
420
|
Returns:
|
416
421
|
list: A list of outputs corresponding to each genome in the population after applying the respective
|
417
422
|
activation function and weights.
|
418
423
|
|
424
|
+
Notes:
|
425
|
+
- If `rl_mode` is True:
|
426
|
+
- Accepts x_population is a single genom
|
427
|
+
- The inputs are flattened, and the activation function is applied across the single genom.
|
428
|
+
|
429
|
+
- If `rl_mode` is False:
|
430
|
+
- Accepts x_population is a list of genomes
|
431
|
+
- Each genome is processed individually, and the results are stored in the `outputs` list.
|
432
|
+
|
433
|
+
- `feed_forward()` function is the core function that processes the input with the given weights and activation function.
|
434
|
+
|
419
435
|
Example:
|
420
436
|
```python
|
421
|
-
outputs = evaluate(x_population, weights, activation_potentiations)
|
437
|
+
outputs = evaluate(x_population, weights, activation_potentiations, rl_mode=False)
|
422
438
|
```
|
423
439
|
|
424
440
|
- The function returns a list of outputs after processing the population, where each element corresponds to
|
425
441
|
the output for each genome in `x_population`.
|
426
|
-
"""
|
442
|
+
"""
|
443
|
+
|
444
|
+
### IF RL_MODE IS TRUE, A SINGLE GENOME IS ASSUMED AS INPUT, A FEEDFORWARD PREDICTION IS MADE, AND THE OUTPUT(NPARRAY) IS RETURNED:
|
445
|
+
|
446
|
+
### IF RL_MODE IS FALSE, PREDICTIONS ARE MADE FOR ALL GENOMES IN THE GROUP USING THEIR CORRESPONDING INDEXED INPUTS AND DATA.
|
427
447
|
### THE OUTPUTS ARE RETURNED AS A PYTHON LIST, WHERE EACH GENOME'S OUTPUT MATCHES ITS INDEX:
|
428
448
|
|
429
|
-
|
430
|
-
|
431
|
-
|
449
|
+
if rl_mode == True:
|
450
|
+
Input = np.array(x_population, copy=False, dtype=dtype)
|
451
|
+
Input = Input.ravel()
|
452
|
+
|
453
|
+
if isinstance(activation_potentiations, str):
|
454
|
+
activation_potentiations = [activation_potentiations]
|
455
|
+
|
456
|
+
outputs = feed_forward(Input=Input, is_training=False, activation_potentiation=activation_potentiations, w=weights)
|
457
|
+
|
432
458
|
else:
|
433
|
-
|
459
|
+
outputs = [0] * len(x_population)
|
460
|
+
for i, genome in enumerate(x_population):
|
434
461
|
|
435
|
-
|
436
|
-
|
462
|
+
Input = np.array(genome, copy=False)
|
463
|
+
Input = Input.ravel()
|
437
464
|
|
438
|
-
|
465
|
+
if isinstance(activation_potentiations[i], str):
|
466
|
+
activation_potentiations[i] = [activation_potentiations[i]]
|
467
|
+
|
468
|
+
outputs[i] = feed_forward(Input=Input, is_training=False, activation_potentiation=activation_potentiations[i], w=weights[i])
|
469
|
+
|
470
|
+
return outputs
|
439
471
|
|
440
472
|
|
441
473
|
def cross_over(first_parent_W,
|
@@ -582,11 +614,20 @@ def cross_over(first_parent_W,
|
|
582
614
|
if potential_activation_selection_add > activation_selection_add_prob:
|
583
615
|
|
584
616
|
threshold = abs(activation_selection_threshold / succes)
|
585
|
-
|
617
|
+
new_threshold = threshold
|
618
|
+
|
619
|
+
while True:
|
620
|
+
|
621
|
+
random_index = int(random.uniform(0, len(undominant_parent_act)-1))
|
622
|
+
random_undominant_activation = undominant_parent_act[random_index]
|
586
623
|
|
587
|
-
|
624
|
+
child_act.append(random_undominant_activation)
|
588
625
|
|
589
|
-
|
626
|
+
if len(dominant_parent_act) > new_threshold:
|
627
|
+
new_threshold += threshold
|
628
|
+
|
629
|
+
else:
|
630
|
+
break
|
590
631
|
|
591
632
|
activation_selection_change_prob = 1 - activation_selection_change_prob
|
592
633
|
potential_activation_selection_change_prob = random.uniform(0, 1)
|
@@ -594,12 +635,21 @@ def cross_over(first_parent_W,
|
|
594
635
|
if potential_activation_selection_change_prob > activation_selection_change_prob:
|
595
636
|
|
596
637
|
threshold = abs(activation_selection_threshold / succes)
|
597
|
-
|
598
|
-
|
599
|
-
|
638
|
+
new_threshold = threshold
|
639
|
+
|
640
|
+
while True:
|
641
|
+
|
642
|
+
random_index_undominant = int(random.uniform(0, len(undominant_parent_act)-1))
|
643
|
+
random_index_dominant = int(random.uniform(0, len(dominant_parent_act)-1))
|
644
|
+
random_undominant_activation = undominant_parent_act[random_index_undominant]
|
600
645
|
|
601
|
-
|
646
|
+
child_act[random_index_dominant] = random_undominant_activation
|
602
647
|
|
648
|
+
if len(dominant_parent_act) > new_threshold:
|
649
|
+
new_threshold += threshold
|
650
|
+
|
651
|
+
else:
|
652
|
+
break
|
603
653
|
|
604
654
|
return child_W, child_act
|
605
655
|
|
@@ -673,50 +723,80 @@ def mutation(weight,
|
|
673
723
|
max_threshold = row_end * col_end
|
674
724
|
|
675
725
|
threshold = weight_mutate_threshold * genome_fitness
|
676
|
-
|
677
|
-
|
678
|
-
selected_rows = np.random.randint(start, row_end, size=num_mutations)
|
679
|
-
selected_cols = np.random.randint(start, col_end, size=num_mutations)
|
680
|
-
new_values = np.random.uniform(-1, 1, size=num_mutations)
|
726
|
+
new_threshold = threshold
|
681
727
|
|
682
|
-
|
728
|
+
for _ in range(max_threshold):
|
729
|
+
|
730
|
+
selected_row = int(random.uniform(start, row_end))
|
731
|
+
selected_col = int(random.uniform(start, col_end))
|
683
732
|
|
684
|
-
|
685
|
-
|
733
|
+
weight[selected_row, selected_col] = random.uniform(-1, 1)
|
734
|
+
new_threshold += threshold
|
686
735
|
|
687
|
-
|
736
|
+
if max_threshold > new_threshold:
|
737
|
+
pass
|
688
738
|
|
689
|
-
|
690
|
-
|
691
|
-
max_threshold = len(activations)
|
739
|
+
else:
|
740
|
+
break
|
692
741
|
|
693
|
-
|
742
|
+
activation_mutate_prob = 1 - activation_mutate_prob
|
743
|
+
potential_activation_mutation = random.uniform(0, 1)
|
694
744
|
|
695
|
-
|
696
|
-
|
697
|
-
|
698
|
-
|
699
|
-
|
700
|
-
activation_change_prob = 1 - activation_change_prob
|
745
|
+
if potential_activation_mutation > activation_mutate_prob:
|
746
|
+
|
747
|
+
genome_fitness += epsilon
|
748
|
+
threshold = abs(activation_mutate_threshold / genome_fitness)
|
749
|
+
max_threshold = len(activations)
|
701
750
|
|
702
|
-
|
703
|
-
new_threshold = threshold
|
751
|
+
new_threshold = threshold
|
704
752
|
|
705
|
-
|
706
|
-
|
707
|
-
|
708
|
-
|
709
|
-
|
753
|
+
except_this = ['spiral', 'circular']
|
754
|
+
all_acts = [item for item in all_activations() if item not in except_this] # SPIRAL AND CIRCULAR ACTIVATION DISCARDED
|
755
|
+
|
756
|
+
activation_add_prob = 1 - activation_add_prob
|
757
|
+
activation_delete_prob = 1 - activation_delete_prob
|
758
|
+
activation_change_prob = 1 - activation_change_prob
|
710
759
|
|
711
|
-
|
712
|
-
activations.append(random.choice(all_acts))
|
760
|
+
for _ in range(max_threshold):
|
713
761
|
|
714
|
-
|
715
|
-
|
762
|
+
potential_activation_add_prob = random.uniform(0, 1)
|
763
|
+
potential_activation_delete_prob = random.uniform(0, 1)
|
764
|
+
potential_activation_change_prob = random.uniform(0, 1)
|
716
765
|
|
717
|
-
|
718
|
-
|
719
|
-
|
766
|
+
|
767
|
+
if potential_activation_delete_prob > activation_delete_prob and len(activations) > 1:
|
768
|
+
|
769
|
+
random_index = random.randint(0, len(activations) - 1)
|
770
|
+
activations.pop(random_index)
|
771
|
+
|
772
|
+
|
773
|
+
if potential_activation_add_prob > activation_add_prob:
|
774
|
+
|
775
|
+
try:
|
776
|
+
|
777
|
+
random_index_all_act = int(random.uniform(0, len(all_acts)-1))
|
778
|
+
activations.append(all_acts[random_index_all_act])
|
779
|
+
|
780
|
+
except:
|
781
|
+
|
782
|
+
activation = activations
|
783
|
+
activations = []
|
784
|
+
|
785
|
+
activations.append(activation)
|
786
|
+
activations.append(all_acts[int(random.uniform(0, len(all_acts)-1))])
|
787
|
+
|
788
|
+
|
789
|
+
if potential_activation_change_prob > activation_change_prob:
|
790
|
+
|
791
|
+
random_index_all_act = int(random.uniform(0, len(all_acts)-1))
|
792
|
+
random_index_genom_act = int(random.uniform(0, len(activations)-1))
|
793
|
+
|
794
|
+
activations[random_index_genom_act] = all_acts[random_index_all_act]
|
795
|
+
|
796
|
+
new_threshold += threshold
|
797
|
+
|
798
|
+
if max_threshold > new_threshold: pass
|
799
|
+
else: break
|
720
800
|
|
721
801
|
return weight, activations
|
722
802
|
|
pyerualjetwork/planeat_cuda.py
CHANGED
@@ -583,11 +583,21 @@ def cross_over(first_parent_W,
|
|
583
583
|
if potential_activation_selection_add > activation_selection_add_prob:
|
584
584
|
|
585
585
|
threshold = abs(activation_selection_threshold / succes)
|
586
|
-
|
586
|
+
new_threshold = threshold
|
587
|
+
|
588
|
+
while True:
|
589
|
+
|
590
|
+
random_index = int(random.uniform(0, len(undominant_parent_act)-1))
|
591
|
+
random_undominant_activation = undominant_parent_act[random_index]
|
587
592
|
|
588
|
-
|
593
|
+
child_act.append(random_undominant_activation)
|
594
|
+
new_threshold += threshold
|
589
595
|
|
590
|
-
|
596
|
+
if len(dominant_parent_act) > new_threshold:
|
597
|
+
pass
|
598
|
+
|
599
|
+
else:
|
600
|
+
break
|
591
601
|
|
592
602
|
activation_selection_change_prob = 1 - activation_selection_change_prob
|
593
603
|
potential_activation_selection_change_prob = random.uniform(0, 1)
|
@@ -595,12 +605,22 @@ def cross_over(first_parent_W,
|
|
595
605
|
if potential_activation_selection_change_prob > activation_selection_change_prob:
|
596
606
|
|
597
607
|
threshold = abs(activation_selection_threshold / succes)
|
598
|
-
|
599
|
-
|
600
|
-
|
608
|
+
new_threshold = threshold
|
609
|
+
|
610
|
+
while True:
|
611
|
+
|
612
|
+
random_index_undominant = int(random.uniform(0, len(undominant_parent_act)-1))
|
613
|
+
random_index_dominant = int(random.uniform(0, len(dominant_parent_act)-1))
|
614
|
+
random_undominant_activation = undominant_parent_act[random_index_undominant]
|
601
615
|
|
602
|
-
|
616
|
+
child_act[random_index_dominant] = random_undominant_activation
|
617
|
+
new_threshold += threshold
|
603
618
|
|
619
|
+
if len(dominant_parent_act) > new_threshold:
|
620
|
+
pass
|
621
|
+
|
622
|
+
else:
|
623
|
+
break
|
604
624
|
|
605
625
|
return child_W, child_act
|
606
626
|
|
@@ -675,50 +695,79 @@ def mutation(weight,
|
|
675
695
|
max_threshold = row_end * col_end
|
676
696
|
|
677
697
|
threshold = weight_mutate_threshold * genome_fitness
|
678
|
-
|
698
|
+
new_threshold = threshold
|
679
699
|
|
680
|
-
|
681
|
-
|
682
|
-
|
683
|
-
|
684
|
-
weight[selected_rows, selected_cols] = new_values
|
700
|
+
for _ in range(max_threshold):
|
701
|
+
|
702
|
+
selected_row = int(random.uniform(start, row_end))
|
703
|
+
selected_col = int(random.uniform(start, col_end))
|
685
704
|
|
686
|
-
|
687
|
-
|
705
|
+
weight[selected_row, selected_col] = random.uniform(-1, 1)
|
706
|
+
new_threshold += threshold
|
688
707
|
|
689
|
-
|
690
|
-
|
691
|
-
|
692
|
-
|
693
|
-
|
708
|
+
if max_threshold > new_threshold:
|
709
|
+
pass
|
710
|
+
|
711
|
+
else:
|
712
|
+
break
|
694
713
|
|
695
|
-
|
696
|
-
|
697
|
-
except_this = ['spiral', 'circular']
|
698
|
-
all_acts = [item for item in all_activations() if item not in except_this] # SPIRAL AND CIRCULAR ACTIVATION DISCARDED
|
699
|
-
|
700
|
-
activation_add_prob = 1 - activation_add_prob
|
701
|
-
activation_delete_prob = 1 - activation_delete_prob
|
702
|
-
activation_change_prob = 1 - activation_change_prob
|
714
|
+
activation_mutate_prob = 1 - activation_mutate_prob
|
715
|
+
potential_activation_mutation = random.uniform(0, 1)
|
703
716
|
|
704
|
-
|
705
|
-
|
717
|
+
if potential_activation_mutation > activation_mutate_prob:
|
718
|
+
|
719
|
+
genome_fitness += epsilon
|
720
|
+
threshold = abs(activation_mutate_threshold / genome_fitness)
|
721
|
+
max_threshold = len(activations)
|
706
722
|
|
707
|
-
|
708
|
-
|
709
|
-
|
710
|
-
|
711
|
-
|
723
|
+
new_threshold = threshold
|
724
|
+
|
725
|
+
except_this = ['spiral', 'circular']
|
726
|
+
all_acts = [item for item in all_activations() if item not in except_this] # SPIRAL AND CIRCULAR ACTIVATION DISCARDED
|
727
|
+
|
728
|
+
activation_add_prob = 1 - activation_add_prob
|
729
|
+
activation_delete_prob = 1 - activation_delete_prob
|
730
|
+
activation_change_prob = 1 - activation_change_prob
|
712
731
|
|
713
|
-
|
714
|
-
activations.append(random.choice(all_acts))
|
732
|
+
for _ in range(max_threshold):
|
715
733
|
|
716
|
-
|
717
|
-
|
734
|
+
potential_activation_add_prob = random.uniform(0, 1)
|
735
|
+
potential_activation_delete_prob = random.uniform(0, 1)
|
736
|
+
potential_activation_change_prob = random.uniform(0, 1)
|
718
737
|
|
719
|
-
|
720
|
-
|
721
|
-
|
738
|
+
|
739
|
+
if potential_activation_delete_prob > activation_delete_prob and len(activations) > 1:
|
740
|
+
|
741
|
+
random_index = random.randint(0, len(activations) - 1)
|
742
|
+
activations.pop(random_index)
|
743
|
+
|
744
|
+
|
745
|
+
if potential_activation_add_prob > activation_add_prob:
|
746
|
+
|
747
|
+
try:
|
748
|
+
|
749
|
+
random_index_all_act = int(random.uniform(0, len(all_acts)-1))
|
750
|
+
activations.append(all_acts[random_index_all_act])
|
751
|
+
|
752
|
+
except:
|
753
|
+
|
754
|
+
activation = activations
|
755
|
+
activations = []
|
756
|
+
|
757
|
+
activations.append(activation)
|
758
|
+
activations.append(all_acts[int(random.uniform(0, len(all_acts)-1))])
|
759
|
+
|
760
|
+
if potential_activation_change_prob > activation_change_prob:
|
761
|
+
|
762
|
+
random_index_all_act = int(random.uniform(0, len(all_acts)-1))
|
763
|
+
random_index_genom_act = int(random.uniform(0, len(activations)-1))
|
764
|
+
|
765
|
+
activations[random_index_genom_act] = all_acts[random_index_all_act]
|
766
|
+
|
767
|
+
new_threshold += threshold
|
768
|
+
|
769
|
+
if max_threshold > new_threshold: pass
|
770
|
+
else: break
|
722
771
|
|
723
772
|
return weight, activations
|
724
773
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 4.3.
|
3
|
+
Version: 4.3.9b6
|
4
4
|
Summary: PyerualJetwork is a machine learning library supported with GPU(CUDA) acceleration written in Python for professionals and researchers including with PLAN algorithm, PLANEAT algorithm (genetic optimization). Also includes data pre-process and memory manegament
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -1,4 +1,4 @@
|
|
1
|
-
pyerualjetwork/__init__.py,sha256=
|
1
|
+
pyerualjetwork/__init__.py,sha256=koxM5MI-uTn5QAa9WgTtktH9ZJMJeFvU-lqLBGDbHt8,641
|
2
2
|
pyerualjetwork/activation_functions.py,sha256=bKf00lsuuLJNO-4vVp4OqBi4zJ-qZ8L3v-vl52notkY,7721
|
3
3
|
pyerualjetwork/activation_functions_cuda.py,sha256=5y1Ti3GDfDteQDCUmODwe7tAyDAUlDTKmIikChQ8d6g,7772
|
4
4
|
pyerualjetwork/data_operations.py,sha256=Flteouu6rfSo2uHMqBHuzO02dXmbNa-I5qWmUpGTZ5Y,14760
|
@@ -11,14 +11,14 @@ pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,607
|
|
11
11
|
pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
|
12
12
|
pyerualjetwork/model_operations.py,sha256=MCSCNYiiICRVZITobtS3ZIWmH5Q9gjyELuH32sAdgg4,12649
|
13
13
|
pyerualjetwork/model_operations_cuda.py,sha256=NT01BK5nrDYE7H1x3KnSI8gmx0QTGGB0mP_LqEb1uuU,13157
|
14
|
-
pyerualjetwork/plan.py,sha256=
|
15
|
-
pyerualjetwork/plan_cuda.py,sha256=
|
16
|
-
pyerualjetwork/planeat.py,sha256=
|
17
|
-
pyerualjetwork/planeat_cuda.py,sha256=
|
14
|
+
pyerualjetwork/plan.py,sha256=mcVxwBus_GwfYjWLMwx8KDc25uJT8l773l-mHCfa0Xk,23558
|
15
|
+
pyerualjetwork/plan_cuda.py,sha256=8uEyYdQQX122Hcc-XfFoPSiCeLADt-y-cGX3AuRYPt0,24440
|
16
|
+
pyerualjetwork/planeat.py,sha256=uRX-hDywGOai6hHhbYrmcRodNZOg4WCQeJWZbdMlZs8,39470
|
17
|
+
pyerualjetwork/planeat_cuda.py,sha256=aTdBmhJeIKC58pssODtqVsdOtJP7W6TRmVyGHp7k_CM,37612
|
18
18
|
pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
|
19
19
|
pyerualjetwork/visualizations.py,sha256=08O5uEewuYiovZRX1uHWEHjn19LcnhndWYvqVN74xs0,28290
|
20
20
|
pyerualjetwork/visualizations_cuda.py,sha256=PYRqj4QYUbuYMYcNwO8yaTPB-jK7E6kZHhTrAi0lwPU,28749
|
21
|
-
pyerualjetwork-4.3.
|
22
|
-
pyerualjetwork-4.3.
|
23
|
-
pyerualjetwork-4.3.
|
24
|
-
pyerualjetwork-4.3.
|
21
|
+
pyerualjetwork-4.3.9b6.dist-info/METADATA,sha256=G5E_5NHaNjMFmJH2KbZ4MK8DFR0Dc6YFDjJhRHVGvBc,7476
|
22
|
+
pyerualjetwork-4.3.9b6.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
23
|
+
pyerualjetwork-4.3.9b6.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
|
24
|
+
pyerualjetwork-4.3.9b6.dist-info/RECORD,,
|
File without changes
|
File without changes
|