pyerualjetwork 4.3.12__py3-none-any.whl → 4.3.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,4 @@
1
- __version__ = "4.3.12"
1
+ __version__ = "4.3.13"
2
2
  __update__ = """* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES
3
3
  * PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main
4
4
  * PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
pyerualjetwork/plan.py CHANGED
@@ -84,7 +84,7 @@ def fit(
84
84
 
85
85
 
86
86
  def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, pop_size=None,
87
- neural_web_history=False, show_current_activations=False, auto_normalization=False,
87
+ weight_evolve=False, neural_web_history=False, show_current_activations=False, auto_normalization=False,
88
88
  neurons_history=False, early_stop=False, show_history=False, target_loss=None,
89
89
  interval=33.33, target_acc=None, loss='categorical_crossentropy', acc_impact=0.9, loss_impact=0.1,
90
90
  start_this_act=None, start_this_W=None, dtype=np.float32):
@@ -119,6 +119,9 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
119
119
  batch_size=0.05,
120
120
  interval=16.67)
121
121
  ```
122
+
123
+ weight_evolve (bool, optional): Activation combinations already optimizes by PLANEAT genetic search algorithm. Should the weight parameters also evolve or should the weights be determined according to the aggregating learning principle of the PLAN algorithm? Default: True (Evolves Weights)
124
+
122
125
  loss (str, optional): options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
123
126
 
124
127
  target_acc (float, optional): The target accuracy to stop training early when achieved. Default is None.
@@ -232,6 +235,9 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
232
235
  W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
233
236
  weight_pop[j] = W
234
237
 
238
+ if weight_evolve is False:
239
+ weight_pop[j] = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
240
+
235
241
  model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
236
242
  acc = model[get_acc()]
237
243
 
@@ -351,7 +357,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
351
357
  best_acc_per_gen_list.append(best_acc)
352
358
  loss_list.append(best_loss)
353
359
 
354
- weight_pop, act_pop = optimizer(np.array(weight_pop, copy=False, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), bar_status=False)
360
+ weight_pop, act_pop = optimizer(np.array(weight_pop, copy=False, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), weight_evolve=weight_evolve, bar_status=False)
355
361
  target_pop = []
356
362
 
357
363
  # Early stopping check
@@ -83,7 +83,7 @@ def fit(
83
83
 
84
84
 
85
85
  def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, pop_size=None,
86
- neural_web_history=False, show_current_activations=False, auto_normalization=False, target_acc=None,
86
+ weight_evolve=False, neural_web_history=False, show_current_activations=False, auto_normalization=False, target_acc=None,
87
87
  neurons_history=False, early_stop=False, show_history=False, loss='categorical_crossentropy',
88
88
  interval=33.33, target_loss=None, loss_impact=0.1, acc_impact=0.9,
89
89
  start_this_act=None, start_this_W=None, dtype=cp.float32, memory='gpu'):
@@ -118,6 +118,9 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
118
118
  batch_size=0.05,
119
119
  interval=16.67)
120
120
  ```
121
+
122
+ weight_evolve (bool, optional): Activation combinations already optimizes by PLANEAT genetic search algorithm. Should the weight parameters also evolve or should the weights be determined according to the aggregating learning principle of the PLAN algorithm? Default: True (Evolves Weights)
123
+
121
124
  loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training. options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
122
125
 
123
126
  target_acc (float, optional): The target accuracy to stop training early when achieved. Default is None.
@@ -247,6 +250,9 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
247
250
  W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
248
251
  weight_pop[j] = W
249
252
 
253
+ if weight_evolve is False:
254
+ weight_pop[j] = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
255
+
250
256
  model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
251
257
  acc = model[get_acc()]
252
258
 
@@ -367,7 +373,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
367
373
  best_acc_per_gen_list.append(best_acc)
368
374
  loss_list.append(best_loss)
369
375
 
370
- weight_pop, act_pop = optimizer(cp.array(weight_pop, copy=False, dtype=dtype), act_pop, i, cp.array(target_pop, dtype=dtype, copy=False), bar_status=False)
376
+ weight_pop, act_pop = optimizer(cp.array(weight_pop, copy=False, dtype=dtype), act_pop, i, cp.array(target_pop, dtype=dtype, copy=False), weight_evolve=weight_evolve, bar_status=False)
371
377
  target_pop = []
372
378
 
373
379
  # Early stopping check
pyerualjetwork/planeat.py CHANGED
@@ -73,7 +73,8 @@ def define_genomes(input_shape, output_shape, population_size, dtype=np.float32)
73
73
  def evolver(weights,
74
74
  activation_potentiations,
75
75
  what_gen,
76
- fitness,
76
+ fitness,
77
+ weight_evolve=True,
77
78
  show_info=False,
78
79
  policy='aggressive',
79
80
  bad_genomes_selection_prob=None,
@@ -112,6 +113,8 @@ def evolver(weights,
112
113
  fitness (numpy.ndarray): A 1D array containing the fitness values of each genome.
113
114
  The array is used to rank the genomes based on their performance. PLANEAT maximizes or minimizes this fitness based on the `target_fitness` parameter.
114
115
 
116
+ weight_evolve (bool, optional): Are weights to be evolves or just activation combinations Default: True. Note: Regardless of whether this parameter is True or False, you must give the evolver function a list of weights equal to the number of activation potentiations. You can create completely random weights if you want. If this parameter is False, the weights entering the evolver function and the resulting weights will be exactly the same.
117
+
115
118
  show_info (bool, optional): If True, prints information about the current generation and the
116
119
  maximum reward obtained. Also shows the current configuration. Default is False.
117
120
 
@@ -261,7 +264,8 @@ def evolver(weights,
261
264
  else:
262
265
  raise ValueError("genome population size must be even number. for example: not 99, make 100 or 98.")
263
266
 
264
-
267
+ if weight_evolve is False: origin_weights = np.copy(weights)
268
+
265
269
  ### FITNESS IS SORTED IN ASCENDING ORDER, AND THE WEIGHT AND ACTIVATIONS OF EACH GENOME ARE SORTED ACCORDING TO THIS ORDER:
266
270
 
267
271
  sort_indices = np.argsort(fitness)
@@ -325,6 +329,7 @@ def evolver(weights,
325
329
  first_parent_fitness=best_fitness,
326
330
  fitness_bias=fitness_bias,
327
331
  second_parent_fitness=normalized_fitness[s_i],
332
+ weight_evolve=weight_evolve,
328
333
  epsilon=epsilon
329
334
  )
330
335
 
@@ -352,6 +357,7 @@ def evolver(weights,
352
357
  weight_mutate_threshold=weight_mutate_threshold,
353
358
  genome_fitness=normalized_fitness[fitness_index],
354
359
  activation_mutate_threshold=activation_mutate_threshold,
360
+ weight_evolve=weight_evolve,
355
361
  epsilon=epsilon
356
362
  )
357
363
 
@@ -397,6 +403,7 @@ def evolver(weights,
397
403
  print(" BEST GENOME INDEX: ", str(0))
398
404
  print(" NOTE: The returned genome at the first index is the best of the previous generation." + '\n')
399
405
 
406
+ if weight_evolve is False: weights = origin_weights
400
407
 
401
408
  return weights, activation_potentiations
402
409
 
@@ -452,6 +459,7 @@ def cross_over(first_parent_W,
452
459
  first_parent_fitness,
453
460
  second_parent_fitness,
454
461
  fitness_bias,
462
+ weight_evolve,
455
463
  epsilon):
456
464
  """
457
465
  Performs a crossover operation on two sets of weights and activation functions.
@@ -486,6 +494,8 @@ def cross_over(first_parent_W,
486
494
 
487
495
  fitness_bias (float): A bias factor used to favor fitter parents during crossover operations.
488
496
 
497
+ weight_evolve (bool, optional): Are weights to be evolves or just activation combinations.
498
+
489
499
  epsilon (float): Small epsilon constant
490
500
 
491
501
  Returns:
@@ -513,6 +523,7 @@ def cross_over(first_parent_W,
513
523
  first_parent_fitness=0.9,
514
524
  second_parent_fitness=0.85,
515
525
  fitness_bias=0.6,
526
+ weight_evolve=True,
516
527
  epsilon=np.finfo(float).eps
517
528
  )
518
529
  ```
@@ -546,33 +557,37 @@ def cross_over(first_parent_W,
546
557
  undominant_parent_act = first_parent_act
547
558
  succes = first_parent_fitness + epsilon
548
559
 
549
- while True:
560
+ if weight_evolve is True:
550
561
 
551
- row_cut_start = int(random.uniform(start, row_end))
552
- col_cut_start = int(random.uniform(start, col_end))
562
+ while True:
553
563
 
554
- row_cut_end = int(random.uniform(start, row_end))
555
- col_cut_end = int(random.uniform(start, col_end))
564
+ row_cut_start = int(random.uniform(start, row_end))
565
+ col_cut_start = int(random.uniform(start, col_end))
556
566
 
557
- if ((row_cut_end > row_cut_start) and
558
- (col_cut_end > col_cut_start) and
559
- (((row_cut_end + 1) - (row_cut_start + 1) * 2) + ((col_cut_end + 1) - (col_cut_start + 1) * 2) <= half_of_gene)):
560
- break
561
-
562
- selection_bias = random.uniform(0, 1)
567
+ row_cut_end = int(random.uniform(start, row_end))
568
+ col_cut_end = int(random.uniform(start, col_end))
563
569
 
564
- if fitness_bias > selection_bias:
565
- row_cut_start = math.floor(row_cut_start * succes)
566
- row_cut_end = math.ceil(row_cut_end * succes)
570
+ if ((row_cut_end > row_cut_start) and
571
+ (col_cut_end > col_cut_start) and
572
+ (((row_cut_end + 1) - (row_cut_start + 1) * 2) + ((col_cut_end + 1) - (col_cut_start + 1) * 2) <= half_of_gene)):
573
+ break
574
+
575
+ selection_bias = random.uniform(0, 1)
567
576
 
568
- col_cut_start = math.floor(col_cut_start * succes)
569
- col_cut_end = math.ceil(col_cut_end * succes)
577
+ if fitness_bias > selection_bias:
578
+ row_cut_start = math.floor(row_cut_start * succes)
579
+ row_cut_end = math.ceil(row_cut_end * succes)
570
580
 
571
- child_W = dominant_parent_W
581
+ col_cut_start = math.floor(col_cut_start * succes)
582
+ col_cut_end = math.ceil(col_cut_end * succes)
572
583
 
573
- if cross_over_mode == 'tpm':
574
- child_W[row_cut_start:row_cut_end, col_cut_start:col_cut_end] = undominant_parent_W[row_cut_start:row_cut_end, col_cut_start:col_cut_end]
584
+ child_W = dominant_parent_W
575
585
 
586
+ if cross_over_mode == 'tpm':
587
+ child_W[row_cut_start:row_cut_end, col_cut_start:col_cut_end] = undominant_parent_W[row_cut_start:row_cut_end, col_cut_start:col_cut_end]
588
+
589
+ else: child_W = dominant_parent_W
590
+
576
591
  if isinstance(dominant_parent_act, str): dominant_parent_act = [dominant_parent_act]
577
592
  if isinstance(undominant_parent_act, str): undominant_parent_act = [undominant_parent_act]
578
593
 
@@ -635,6 +650,7 @@ def mutation(weight,
635
650
  weight_mutate_threshold,
636
651
  genome_fitness,
637
652
  activation_mutate_threshold,
653
+ weight_evolve,
638
654
  epsilon):
639
655
  """
640
656
  Performs mutation on the given weight matrix and activation functions.
@@ -662,6 +678,8 @@ def mutation(weight,
662
678
 
663
679
  activation_mutate_threshold (float): Determines max how much activation mutaiton operation applying. (Function automaticly determines to min)
664
680
 
681
+ weight_evolve (bool, optional): Are weights to be mutates or just activation combinations.
682
+
665
683
  epsilon (float): Small epsilon constant
666
684
 
667
685
  Returns:
@@ -683,92 +701,94 @@ def mutation(weight,
683
701
 
684
702
  if isinstance(activations, str): activations = [activations]
685
703
 
686
- weight_mutate_prob = 1 - weight_mutate_prob # if prob 0.8 (%80) then 1 - 0.8. Because 0-1 random number probably greater than 0.2
687
- potential_weight_mutation = random.uniform(0, 1)
704
+ if weight_evolve is True:
688
705
 
689
- if potential_weight_mutation > weight_mutate_prob:
706
+ weight_mutate_prob = 1 - weight_mutate_prob # if prob 0.8 (%80) then 1 - 0.8. Because 0-1 random number probably greater than 0.2
707
+ potential_weight_mutation = random.uniform(0, 1)
690
708
 
691
- start = 0
692
- row_end = weight.shape[0]
693
- col_end = weight.shape[1]
709
+ if potential_weight_mutation > weight_mutate_prob:
694
710
 
695
- max_threshold = row_end * col_end
711
+ start = 0
712
+ row_end = weight.shape[0]
713
+ col_end = weight.shape[1]
696
714
 
697
- threshold = weight_mutate_threshold * (genome_fitness + epsilon)
698
- new_threshold = threshold
699
-
700
- for _ in range(max_threshold):
701
-
702
- selected_row = int(random.uniform(start, row_end))
703
- selected_col = int(random.uniform(start, col_end))
704
-
705
- weight[selected_row, selected_col] = random.uniform(-1, 1)
706
- new_threshold += threshold
715
+ max_threshold = row_end * col_end
707
716
 
708
- if max_threshold > new_threshold:
709
- pass
717
+ threshold = weight_mutate_threshold * (genome_fitness + epsilon)
718
+ new_threshold = threshold
710
719
 
711
- else:
712
- break
720
+ for _ in range(max_threshold):
721
+
722
+ selected_row = int(random.uniform(start, row_end))
723
+ selected_col = int(random.uniform(start, col_end))
713
724
 
714
- activation_mutate_prob = 1 - activation_mutate_prob
715
- potential_activation_mutation = random.uniform(0, 1)
725
+ weight[selected_row, selected_col] = random.uniform(-1, 1)
726
+ new_threshold += threshold
716
727
 
717
- if potential_activation_mutation > activation_mutate_prob:
728
+ if max_threshold > new_threshold:
729
+ pass
718
730
 
719
- genome_fitness += epsilon
720
- threshold = abs(activation_mutate_threshold / genome_fitness)
721
- max_threshold = len(activations)
731
+ else:
732
+ break
722
733
 
723
- new_threshold = threshold
734
+ activation_mutate_prob = 1 - activation_mutate_prob
735
+ potential_activation_mutation = random.uniform(0, 1)
724
736
 
725
- except_this = ['spiral', 'circular']
726
- all_acts = [item for item in all_activations() if item not in except_this] # SPIRAL AND CIRCULAR ACTIVATION DISCARDED
737
+ if potential_activation_mutation > activation_mutate_prob:
727
738
 
728
- activation_add_prob = 1 - activation_add_prob
729
- activation_delete_prob = 1 - activation_delete_prob
730
- activation_change_prob = 1 - activation_change_prob
739
+ genome_fitness += epsilon
740
+ threshold = abs(activation_mutate_threshold / genome_fitness)
741
+ max_threshold = len(activations)
731
742
 
732
- for _ in range(max_threshold):
733
-
734
- potential_activation_add_prob = random.uniform(0, 1)
735
- potential_activation_delete_prob = random.uniform(0, 1)
736
- potential_activation_change_prob = random.uniform(0, 1)
743
+ new_threshold = threshold
737
744
 
745
+ except_this = ['spiral', 'circular']
746
+ all_acts = [item for item in all_activations() if item not in except_this] # SPIRAL AND CIRCULAR ACTIVATION DISCARDED
747
+
748
+ activation_add_prob = 1 - activation_add_prob
749
+ activation_delete_prob = 1 - activation_delete_prob
750
+ activation_change_prob = 1 - activation_change_prob
738
751
 
739
- if potential_activation_delete_prob > activation_delete_prob and len(activations) > 1:
740
-
741
- random_index = random.randint(0, len(activations) - 1)
742
- activations.pop(random_index)
752
+ for _ in range(max_threshold):
743
753
 
754
+ potential_activation_add_prob = random.uniform(0, 1)
755
+ potential_activation_delete_prob = random.uniform(0, 1)
756
+ potential_activation_change_prob = random.uniform(0, 1)
744
757
 
745
- if potential_activation_add_prob > activation_add_prob:
746
758
 
747
- try:
748
-
749
- random_index_all_act = int(random.uniform(0, len(all_acts)-1))
750
- activations.append(all_acts[random_index_all_act])
759
+ if potential_activation_delete_prob > activation_delete_prob and len(activations) > 1:
760
+
761
+ random_index = random.randint(0, len(activations) - 1)
762
+ activations.pop(random_index)
751
763
 
752
- except:
753
764
 
754
- activation = activations
755
- activations = []
765
+ if potential_activation_add_prob > activation_add_prob:
756
766
 
757
- activations.append(activation)
758
- activations.append(all_acts[int(random.uniform(0, len(all_acts)-1))])
759
-
760
-
761
- if potential_activation_change_prob > activation_change_prob:
767
+ try:
762
768
 
763
769
  random_index_all_act = int(random.uniform(0, len(all_acts)-1))
764
- random_index_genom_act = int(random.uniform(0, len(activations)-1))
770
+ activations.append(all_acts[random_index_all_act])
765
771
 
766
- activations[random_index_genom_act] = all_acts[random_index_all_act]
772
+ except:
767
773
 
768
- new_threshold += threshold
774
+ activation = activations
775
+ activations = []
776
+
777
+ activations.append(activation)
778
+ activations.append(all_acts[int(random.uniform(0, len(all_acts)-1))])
779
+
780
+
781
+ if potential_activation_change_prob > activation_change_prob:
782
+
783
+ random_index_all_act = int(random.uniform(0, len(all_acts)-1))
784
+ random_index_genom_act = int(random.uniform(0, len(activations)-1))
785
+
786
+ activations[random_index_genom_act] = all_acts[random_index_all_act]
787
+
788
+ new_threshold += threshold
769
789
 
770
- if max_threshold > new_threshold: pass
771
- else: break
790
+ if max_threshold > new_threshold: pass
791
+ else: break
772
792
 
773
793
  return weight, activations
774
794
 
@@ -75,7 +75,8 @@ def define_genomes(input_shape, output_shape, population_size, dtype=cp.float32)
75
75
  def evolver(weights,
76
76
  activation_potentiations,
77
77
  what_gen,
78
- fitness,
78
+ fitness,
79
+ weight_evolve=True,
79
80
  show_info=False,
80
81
  policy='aggressive',
81
82
  bad_genomes_selection_prob=None,
@@ -114,6 +115,8 @@ def evolver(weights,
114
115
  fitness (cupy.ndarray): A 1D array containing the fitness values of each genome.
115
116
  The array is used to rank the genomes based on their performance. PLANEAT maximizes or minimizes this fitness based on the `target_fitness` parameter.
116
117
 
118
+ weight_evolve (bool, optional): Are weights to be evolves or just activation combinations Default: True. Note: Regardless of whether this parameter is True or False, you must give the evolver function a list of weights equal to the number of activation potentiations. You can create completely random weights if you want. If this parameter is False, the weights entering the evolver function and the resulting weights will be exactly the same.
119
+
117
120
  show_info (bool, optional): If True, prints information about the current generation and the
118
121
  maximum reward obtained. Also shows the current configuration. Default is False.
119
122
 
@@ -262,6 +265,8 @@ def evolver(weights,
262
265
  else:
263
266
  raise ValueError("genome population size must be even number. for example: not 99, make 100 or 98.")
264
267
 
268
+ if weight_evolve is False: origin_weights = cp.copy(weights)
269
+
265
270
  ### FITNESS LIST IS SORTED IN ASCENDING ORDER, AND THE WEIGHT AND ACTIVATIONS OF EACH GENOME ARE SORTED ACCORDING TO THIS ORDER:
266
271
 
267
272
  sort_indices = cp.argsort(fitness)
@@ -325,6 +330,7 @@ def evolver(weights,
325
330
  first_parent_fitness=best_fitness,
326
331
  fitness_bias=fitness_bias,
327
332
  second_parent_fitness=normalized_fitness[s_i],
333
+ weight_evolve=weight_evolve,
328
334
  epsilon=epsilon
329
335
  )
330
336
 
@@ -352,6 +358,7 @@ def evolver(weights,
352
358
  weight_mutate_threshold=weight_mutate_threshold,
353
359
  genome_fitness=normalized_fitness[fitness_index],
354
360
  activation_mutate_threshold=activation_mutate_threshold,
361
+ weight_evolve=weight_evolve,
355
362
  epsilon=epsilon
356
363
  )
357
364
 
@@ -396,6 +403,7 @@ def evolver(weights,
396
403
  print(" BEST GENOME INDEX: ", str(0))
397
404
  print(" NOTE: The returned genome at the first index is the best of the previous generation." + '\n')
398
405
 
406
+ if weight_evolve is False: weights = origin_weights
399
407
 
400
408
  return weights, activation_potentiations
401
409
 
@@ -452,6 +460,7 @@ def cross_over(first_parent_W,
452
460
  first_parent_fitness,
453
461
  second_parent_fitness,
454
462
  fitness_bias,
463
+ weight_evolve,
455
464
  epsilon):
456
465
  """
457
466
  Performs a crossover operation on two sets of weights and activation functions.
@@ -486,6 +495,8 @@ def cross_over(first_parent_W,
486
495
 
487
496
  fitness_bias (float): A bias factor used to favor fitter parents during crossover operations.
488
497
 
498
+ weight_evolve (bool, optional): Are weights to be evolves or just activation combinations.
499
+
489
500
  epsilon (float): Small epsilon constant
490
501
 
491
502
  Returns:
@@ -513,6 +524,7 @@ def cross_over(first_parent_W,
513
524
  first_parent_fitness=0.9,
514
525
  second_parent_fitness=0.85,
515
526
  fitness_bias=0.6,
527
+ weight_evolve=True,
516
528
  epsilon=cp.finfo(float).eps
517
529
  )
518
530
  ```
@@ -546,33 +558,35 @@ def cross_over(first_parent_W,
546
558
  undominant_parent_act = first_parent_act
547
559
  succes = first_parent_fitness + epsilon
548
560
 
549
- while True:
550
-
551
- row_cut_start = int(random.uniform(start, row_end))
552
- col_cut_start = int(random.uniform(start, col_end))
561
+ if weight_evolve is True:
562
+ while True:
553
563
 
554
- row_cut_end = int(random.uniform(start, row_end))
555
- col_cut_end = int(random.uniform(start, col_end))
564
+ row_cut_start = int(random.uniform(start, row_end))
565
+ col_cut_start = int(random.uniform(start, col_end))
556
566
 
557
- if ((row_cut_end > row_cut_start) and
558
- (col_cut_end > col_cut_start) and
559
- (((row_cut_end + 1) - (row_cut_start + 1) * 2) + ((col_cut_end + 1) - (col_cut_start + 1) * 2) <= half_of_gene)):
560
- break
561
-
562
- selection_bias = random.uniform(0, 1)
567
+ row_cut_end = int(random.uniform(start, row_end))
568
+ col_cut_end = int(random.uniform(start, col_end))
563
569
 
564
- if fitness_bias > selection_bias:
565
- row_cut_start = math.floor(row_cut_start * succes)
566
- row_cut_end = math.ceil(row_cut_end * succes)
570
+ if ((row_cut_end > row_cut_start) and
571
+ (col_cut_end > col_cut_start) and
572
+ (((row_cut_end + 1) - (row_cut_start + 1) * 2) + ((col_cut_end + 1) - (col_cut_start + 1) * 2) <= half_of_gene)):
573
+ break
574
+
575
+ selection_bias = random.uniform(0, 1)
567
576
 
568
- col_cut_start = math.floor(col_cut_start * succes)
569
- col_cut_end = math.ceil(col_cut_end * succes)
577
+ if fitness_bias > selection_bias:
578
+ row_cut_start = math.floor(row_cut_start * succes)
579
+ row_cut_end = math.ceil(row_cut_end * succes)
570
580
 
571
- child_W = dominant_parent_W
581
+ col_cut_start = math.floor(col_cut_start * succes)
582
+ col_cut_end = math.ceil(col_cut_end * succes)
572
583
 
573
- if cross_over_mode == 'tpm':
574
- child_W[row_cut_start:row_cut_end, col_cut_start:col_cut_end] = undominant_parent_W[row_cut_start:row_cut_end, col_cut_start:col_cut_end]
584
+ child_W = dominant_parent_W
575
585
 
586
+ if cross_over_mode == 'tpm':
587
+ child_W[row_cut_start:row_cut_end, col_cut_start:col_cut_end] = undominant_parent_W[row_cut_start:row_cut_end, col_cut_start:col_cut_end]
588
+
589
+ else: child_W = dominant_parent_W
576
590
 
577
591
  if isinstance(dominant_parent_act, str): dominant_parent_act = [dominant_parent_act]
578
592
  if isinstance(undominant_parent_act, str): undominant_parent_act = [undominant_parent_act]
@@ -637,6 +651,7 @@ def mutation(weight,
637
651
  weight_mutate_threshold,
638
652
  genome_fitness,
639
653
  activation_mutate_threshold,
654
+ weight_evolve,
640
655
  epsilon):
641
656
  """
642
657
  Performs mutation on the given weight matrix and activation functions.
@@ -664,6 +679,8 @@ def mutation(weight,
664
679
 
665
680
  activation_mutate_threshold (float): Determines max how much activation mutaiton operation applying. (Function automaticly determines to min)
666
681
 
682
+ weight_evolve (bool, optional): Are weights to be mutates or just activation combinations.
683
+
667
684
  epsilon (float): Small epsilon constant
668
685
 
669
686
  Returns:
@@ -685,91 +702,93 @@ def mutation(weight,
685
702
 
686
703
  if isinstance(activations, str): activations = [activations]
687
704
 
688
- weight_mutate_prob = 1 - weight_mutate_prob # if prob 0.8 (%80) then 1 - 0.8. Because 0-1 random number probably greater than 0.2
689
- potential_weight_mutation = random.uniform(0, 1)
705
+ if weight_evolve is True:
690
706
 
691
- if potential_weight_mutation > weight_mutate_prob:
707
+ weight_mutate_prob = 1 - weight_mutate_prob # if prob 0.8 (%80) then 1 - 0.8. Because 0-1 random number probably greater than 0.2
708
+ potential_weight_mutation = random.uniform(0, 1)
692
709
 
693
- start = 0
694
- row_end = weight.shape[0]
695
- col_end = weight.shape[1]
696
-
697
- max_threshold = row_end * col_end
698
-
699
- threshold = weight_mutate_threshold * (genome_fitness + epsilon)
700
- new_threshold = threshold
710
+ if potential_weight_mutation > weight_mutate_prob:
701
711
 
702
- for _ in range(max_threshold):
703
-
704
- selected_row = int(random.uniform(start, row_end))
705
- selected_col = int(random.uniform(start, col_end))
706
-
707
- weight[selected_row, selected_col] = random.uniform(-1, 1)
708
- new_threshold += threshold
709
-
710
- if max_threshold > new_threshold:
711
- pass
712
+ start = 0
713
+ row_end = weight.shape[0]
714
+ col_end = weight.shape[1]
712
715
 
713
- else:
714
- break
715
-
716
- activation_mutate_prob = 1 - activation_mutate_prob
717
- potential_activation_mutation = random.uniform(0, 1)
718
-
719
- if potential_activation_mutation > activation_mutate_prob:
720
-
721
- genome_fitness += epsilon
722
- threshold = abs(activation_mutate_threshold / genome_fitness)
723
- max_threshold = len(activations)
716
+ max_threshold = row_end * col_end
724
717
 
718
+ threshold = weight_mutate_threshold * (genome_fitness + epsilon)
725
719
  new_threshold = threshold
726
-
727
- except_this = ['spiral', 'circular']
728
- all_acts = [item for item in all_activations() if item not in except_this] # SPIRAL AND CIRCULAR ACTIVATION DISCARDED
729
-
730
- activation_add_prob = 1 - activation_add_prob
731
- activation_delete_prob = 1 - activation_delete_prob
732
- activation_change_prob = 1 - activation_change_prob
733
720
 
734
721
  for _ in range(max_threshold):
722
+
723
+ selected_row = int(random.uniform(start, row_end))
724
+ selected_col = int(random.uniform(start, col_end))
735
725
 
736
- potential_activation_add_prob = random.uniform(0, 1)
737
- potential_activation_delete_prob = random.uniform(0, 1)
738
- potential_activation_change_prob = random.uniform(0, 1)
726
+ weight[selected_row, selected_col] = random.uniform(-1, 1)
727
+ new_threshold += threshold
739
728
 
729
+ if max_threshold > new_threshold:
730
+ pass
740
731
 
741
- if potential_activation_delete_prob > activation_delete_prob and len(activations) > 1:
742
-
743
- random_index = random.randint(0, len(activations) - 1)
744
- activations.pop(random_index)
745
-
732
+ else:
733
+ break
746
734
 
747
- if potential_activation_add_prob > activation_add_prob:
735
+ activation_mutate_prob = 1 - activation_mutate_prob
736
+ potential_activation_mutation = random.uniform(0, 1)
748
737
 
749
- try:
750
-
751
- random_index_all_act = int(random.uniform(0, len(all_acts)-1))
752
- activations.append(all_acts[random_index_all_act])
738
+ if potential_activation_mutation > activation_mutate_prob:
739
+
740
+ genome_fitness += epsilon
741
+ threshold = abs(activation_mutate_threshold / genome_fitness)
742
+ max_threshold = len(activations)
753
743
 
754
- except:
744
+ new_threshold = threshold
745
+
746
+ except_this = ['spiral', 'circular']
747
+ all_acts = [item for item in all_activations() if item not in except_this] # SPIRAL AND CIRCULAR ACTIVATION DISCARDED
748
+
749
+ activation_add_prob = 1 - activation_add_prob
750
+ activation_delete_prob = 1 - activation_delete_prob
751
+ activation_change_prob = 1 - activation_change_prob
755
752
 
756
- activation = activations
757
- activations = []
753
+ for _ in range(max_threshold):
758
754
 
759
- activations.append(activation)
760
- activations.append(all_acts[int(random.uniform(0, len(all_acts)-1))])
761
-
762
- if potential_activation_change_prob > activation_change_prob:
755
+ potential_activation_add_prob = random.uniform(0, 1)
756
+ potential_activation_delete_prob = random.uniform(0, 1)
757
+ potential_activation_change_prob = random.uniform(0, 1)
758
+
759
+
760
+ if potential_activation_delete_prob > activation_delete_prob and len(activations) > 1:
761
+
762
+ random_index = random.randint(0, len(activations) - 1)
763
+ activations.pop(random_index)
764
+
765
+
766
+ if potential_activation_add_prob > activation_add_prob:
767
+
768
+ try:
763
769
 
764
770
  random_index_all_act = int(random.uniform(0, len(all_acts)-1))
765
- random_index_genom_act = int(random.uniform(0, len(activations)-1))
771
+ activations.append(all_acts[random_index_all_act])
766
772
 
767
- activations[random_index_genom_act] = all_acts[random_index_all_act]
773
+ except:
768
774
 
769
- new_threshold += threshold
775
+ activation = activations
776
+ activations = []
777
+
778
+ activations.append(activation)
779
+ activations.append(all_acts[int(random.uniform(0, len(all_acts)-1))])
780
+
781
+ if potential_activation_change_prob > activation_change_prob:
782
+
783
+ random_index_all_act = int(random.uniform(0, len(all_acts)-1))
784
+ random_index_genom_act = int(random.uniform(0, len(activations)-1))
785
+
786
+ activations[random_index_genom_act] = all_acts[random_index_all_act]
787
+
788
+ new_threshold += threshold
770
789
 
771
- if max_threshold > new_threshold: pass
772
- else: break
790
+ if max_threshold > new_threshold: pass
791
+ else: break
773
792
 
774
793
  return weight, activations
775
794
 
@@ -300,9 +300,9 @@ def draw_activations(x_train, activation):
300
300
 
301
301
  elif activation == 'cubic_quadratic':
302
302
  result = af.cubic_quadratic(x_train)
303
-
304
- elif activation == 'exp_cubic':
305
- result = af.exp_cubic(x_train)
303
+
304
+ #elif activation == 'exp_cubic':
305
+ #result = af.exp_cubic(x_train)
306
306
 
307
307
  elif activation == 'sine_square':
308
308
  result = af.sine_square(x_train)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 4.3.12
3
+ Version: 4.3.13
4
4
  Summary: PyerualJetwork is a machine learning library supported with GPU(CUDA) acceleration written in Python for professionals and researchers including with PLAN algorithm, PLANEAT algorithm (genetic optimization). Also includes data pre-process and memory manegament
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
@@ -1,4 +1,4 @@
1
- pyerualjetwork/__init__.py,sha256=fJItQArSdhwY3VlHiV7LjyWy21XGRdOUPNaHZIyWFow,1280
1
+ pyerualjetwork/__init__.py,sha256=8QxVhLrF9q38cHUgWG-XtQMIkD9w6SgPTgnyjl5PiG8,1280
2
2
  pyerualjetwork/activation_functions.py,sha256=Ms0AGBqkJuCA42ht64MSQnO54Td_1eDGquedpoBDVbc,7642
3
3
  pyerualjetwork/activation_functions_cuda.py,sha256=5y1Ti3GDfDteQDCUmODwe7tAyDAUlDTKmIikChQ8d6g,7772
4
4
  pyerualjetwork/data_operations.py,sha256=Flteouu6rfSo2uHMqBHuzO02dXmbNa-I5qWmUpGTZ5Y,14760
@@ -12,14 +12,14 @@ pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,607
12
12
  pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
13
13
  pyerualjetwork/model_operations.py,sha256=MCSCNYiiICRVZITobtS3ZIWmH5Q9gjyELuH32sAdgg4,12649
14
14
  pyerualjetwork/model_operations_cuda.py,sha256=NT01BK5nrDYE7H1x3KnSI8gmx0QTGGB0mP_LqEb1uuU,13157
15
- pyerualjetwork/plan.py,sha256=36ZkwcE8HVkx_WIFbfGe4xAjUPwL0KrJQmI-3_NuryQ,22391
16
- pyerualjetwork/plan_cuda.py,sha256=qVhAG2ZS1zudygegILPQD5gEWWCm8-JhMvNtFtoxuds,23394
17
- pyerualjetwork/planeat.py,sha256=4BVC7km2O93hB5qK3q6-GB4sO-2WHwio9E780_e49Cs,37721
18
- pyerualjetwork/planeat_cuda.py,sha256=8ftHNWD3DXKHXF9JHjmoRhM7ylcVLWmYCMSyIcVpSjg,37776
15
+ pyerualjetwork/plan.py,sha256=X2mzc9s36xCzUy0SzKeimbeVVlvajhOpXDaME8x83yQ,22974
16
+ pyerualjetwork/plan_cuda.py,sha256=l-2xK2vu-SM64EEOOA8GXsfbXTUNcSL6Sc3ora5Ze3k,23966
17
+ pyerualjetwork/planeat.py,sha256=oAu0a2rs6BQ2HUpLjKFR4m7xDbhUVOzLstop75u35Tg,38889
18
+ pyerualjetwork/planeat_cuda.py,sha256=E_pJEsNKfiF97FvGZGSsxmdqzLAJub05enmneymozcg,38922
19
19
  pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
20
- pyerualjetwork/visualizations.py,sha256=t1BqnFUH5jKiPdFMI2kWjFg6-amrBV0wvW05aD77NQs,28288
20
+ pyerualjetwork/visualizations.py,sha256=1QSisYAaGnO5kug6qo1qOssTkQM-MgR7L8h4c3sczzU,28294
21
21
  pyerualjetwork/visualizations_cuda.py,sha256=PYRqj4QYUbuYMYcNwO8yaTPB-jK7E6kZHhTrAi0lwPU,28749
22
- pyerualjetwork-4.3.12.dist-info/METADATA,sha256=9SFZMDzualijFQdFP8B5qs_DKIi0xX8Vfj8lWCJxpPk,7506
23
- pyerualjetwork-4.3.12.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
24
- pyerualjetwork-4.3.12.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
25
- pyerualjetwork-4.3.12.dist-info/RECORD,,
22
+ pyerualjetwork-4.3.13.dist-info/METADATA,sha256=yGTaFrahQ_b369UOVc11vEtUGG3My1ykykLcJ7e3Jmg,7506
23
+ pyerualjetwork-4.3.13.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
24
+ pyerualjetwork-4.3.13.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
25
+ pyerualjetwork-4.3.13.dist-info/RECORD,,