pyerualjetwork 3.3.0__py3-none-any.whl → 3.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
plan/plan.py CHANGED
@@ -20,6 +20,7 @@ from scipy import io
20
20
  import scipy.io as sio
21
21
  from matplotlib.animation import ArtistAnimation
22
22
  import networkx as nx
23
+ import sys
23
24
 
24
25
  # BUILD -----
25
26
 
@@ -66,7 +67,7 @@ def fit(
66
67
  if len(x_train) != len(y_train):
67
68
 
68
69
  print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit", infoPLAN + Style.RESET_ALL)
69
- return 'e'
70
+ sys.exit()
70
71
 
71
72
  if val == True:
72
73
 
@@ -105,7 +106,7 @@ def fit(
105
106
 
106
107
  print(Fore.RED + "ERROR115: For showing training, val parameter must be True. from: fit",
107
108
  infoPLAN + Style.RESET_ALL)
108
- return 'e'
109
+ sys.exit()
109
110
 
110
111
 
111
112
  class_count = set()
@@ -170,7 +171,7 @@ def fit(
170
171
  if x_train_size != len(inp):
171
172
  print(Fore.RED + "ERROR304: All input matrices or vectors in x_train list, must be same size. from: fit",
172
173
  infoPLAN + Style.RESET_ALL)
173
- return 'e'
174
+ sys.exit()
174
175
 
175
176
  neural_layer = inp
176
177
 
@@ -341,7 +342,7 @@ def generate_fixed_positions(G, layout_type='circular'):
341
342
  pos[node] = (i % grid_size, i // grid_size)
342
343
  else:
343
344
  raise ValueError("Unsupported layout_type. Use 'circular' or 'grid'.")
344
-
345
+
345
346
  return pos
346
347
 
347
348
  def weight_normalization(
@@ -366,11 +367,8 @@ def weight_normalization(
366
367
  return W
367
368
 
368
369
  def weight_identification(
369
- layer_count, # int: Number of layers in the neural network.
370
- class_count, # int: Number of classes in the classification task.
371
370
  fex_neurons,
372
371
  cat_neurons, # list[num]: List of neuron counts for each layer.
373
- x_train_size # int: Size of the input data.
374
372
  ) -> str:
375
373
  """
376
374
  Identifies the weights for a neural network model.
@@ -396,6 +394,20 @@ def weight_identification(
396
394
 
397
395
  # ACTIVATION FUNCTIONS -----
398
396
 
397
+ def spiral_activation(x):
398
+
399
+ r = np.sqrt(np.sum(x**2))
400
+
401
+ theta = np.arctan2(x[1:], x[:-1])
402
+
403
+ spiral_x = r * np.cos(theta + r)
404
+ spiral_y = r * np.sin(theta + r)
405
+
406
+
407
+ spiral_output = np.concatenate(([spiral_x[0]], spiral_y))
408
+
409
+ return spiral_output
410
+
399
411
  def Softmax(
400
412
  x # num: Input data to be transformed using softmax function.
401
413
  ):
@@ -562,15 +574,58 @@ def sine_square(x):
562
574
  def logarithmic(x):
563
575
  return np.log(x**2 + 1)
564
576
 
565
- def power(x, p):
566
- return x**p
567
-
568
577
  def scaled_cubic(x, alpha=1.0):
569
578
  return alpha * x**3
570
579
 
571
580
  def sine_offset(x, beta=0.0):
572
581
  return np.sin(x + beta)
573
582
 
583
+ def activations_list():
584
+ """
585
+ spiral,
586
+ sigmoid,
587
+ relu,
588
+ tanh,
589
+ swish,
590
+ circular,
591
+ mod_circular,
592
+ tanh_circular,
593
+ leaky_relu,
594
+ softplus,
595
+ elu,
596
+ gelu,
597
+ selu,
598
+ sinakt,
599
+ p_squared,
600
+ sglu,
601
+ dlrelu,
602
+ exsig,
603
+ acos,
604
+ gla,
605
+ srelu,
606
+ qelu,
607
+ isra,
608
+ waveakt,
609
+ arctan,
610
+ bent_identity,
611
+ sech,
612
+ softsign,
613
+ pwl,
614
+ cubic,
615
+ gaussian,
616
+ sine,
617
+ tanh_square,
618
+ mod_sigmoid,
619
+ quartic,
620
+ square_quartic,
621
+ cubic_quadratic,
622
+ exp_cubic,
623
+ sine_square,
624
+ logarithmic,
625
+ scaled_cubic,
626
+ sine_offset
627
+ """
628
+
574
629
 
575
630
  def fex(
576
631
  Input, # list[num]: Input data.
@@ -605,132 +660,140 @@ def fex(
605
660
  if activation == 'sigmoid':
606
661
  Output += Sigmoid(Input)
607
662
 
608
- if activation == 'swish':
663
+ elif activation == 'swish':
609
664
  Output += swish(Input)
610
665
 
611
- if activation == 'circular':
666
+ elif activation == 'circular':
612
667
  Output += circular_activation(Input)
613
668
 
614
- if activation == 'mod_circular':
669
+ elif activation == 'mod_circular':
615
670
  Output += modular_circular_activation(Input)
616
671
 
617
- if activation == 'tanh_circular':
672
+ elif activation == 'tanh_circular':
618
673
  Output += tanh_circular_activation(Input)
619
674
 
620
- if activation == 'leaky_relu':
675
+ elif activation == 'leaky_relu':
621
676
  Output += leaky_relu(Input)
622
677
 
623
- if activation == 'relu':
678
+ elif activation == 'relu':
624
679
  Output += Relu(Input)
625
680
 
626
- if activation == 'softplus':
681
+ elif activation == 'softplus':
627
682
  Output += softplus(Input)
628
683
 
629
- if activation == 'elu':
684
+ elif activation == 'elu':
630
685
  Output += elu(Input)
631
686
 
632
- if activation == 'gelu':
687
+ elif activation == 'gelu':
633
688
  Output += gelu(Input)
634
689
 
635
- if activation == 'selu':
690
+ elif activation == 'selu':
636
691
  Output += selu(Input)
637
692
 
638
- if activation == 'softmax':
693
+ elif activation == 'softmax':
639
694
  Output += Softmax(Input)
640
695
 
641
- if activation == 'tanh':
696
+ elif activation == 'tanh':
642
697
  Output += tanh(Input)
643
698
 
644
- if activation == 'sinakt':
699
+ elif activation == 'sinakt':
645
700
  Output += sinakt(Input)
646
701
 
647
- if activation == 'p_squared':
702
+ elif activation == 'p_squared':
648
703
  Output += p_squared(Input)
649
704
 
650
- if activation == 'sglu':
705
+ elif activation == 'sglu':
651
706
  Output += sglu(Input, alpha=1.0)
652
707
 
653
- if activation == 'dlrelu':
708
+ elif activation == 'dlrelu':
654
709
  Output += dlrelu(Input)
655
710
 
656
- if activation == 'exsig':
711
+ elif activation == 'exsig':
657
712
  Output += exsig(Input)
658
713
 
659
- if activation == 'acos':
714
+ elif activation == 'acos':
660
715
  Output += acos(Input, alpha=1.0, beta=0.0)
661
716
 
662
- if activation == 'gla':
717
+ elif activation == 'gla':
663
718
  Output += gla(Input, alpha=1.0, mu=0.0)
664
719
 
665
- if activation == 'srelu':
720
+ elif activation == 'srelu':
666
721
  Output += srelu(Input)
667
722
 
668
- if activation == 'qelu':
723
+ elif activation == 'qelu':
669
724
  Output += qelu(Input)
670
725
 
671
- if activation == 'isra':
726
+ elif activation == 'isra':
672
727
  Output += isra(Input)
673
728
 
674
- if activation == 'waveakt':
729
+ elif activation == 'waveakt':
675
730
  Output += waveakt(Input)
676
731
 
677
- if activation == 'arctan':
732
+ elif activation == 'arctan':
678
733
  Output += arctan(Input)
679
734
 
680
- if activation == 'bent_identity':
735
+ elif activation == 'bent_identity':
681
736
  Output += bent_identity(Input)
682
737
 
683
- if activation == 'sech':
738
+ elif activation == 'sech':
684
739
  Output += sech(Input)
685
740
 
686
- if activation == 'softsign':
741
+ elif activation == 'softsign':
687
742
  Output += softsign(Input)
688
743
 
689
- if activation == 'pwl':
744
+ elif activation == 'pwl':
690
745
  Output += pwl(Input)
691
746
 
692
- if activation == 'cubic':
747
+ elif activation == 'cubic':
693
748
  Output += cubic(Input)
694
749
 
695
- if activation == 'gaussian':
750
+ elif activation == 'gaussian':
696
751
  Output += gaussian(Input)
697
752
 
698
- if activation == 'sine':
753
+ elif activation == 'sine':
699
754
  Output += sine(Input)
700
755
 
701
- if activation == 'tanh_square':
756
+ elif activation == 'tanh_square':
702
757
  Output += tanh_square(Input)
703
758
 
704
- if activation == 'mod_sigmoid':
759
+ elif activation == 'mod_sigmoid':
705
760
  Output += mod_sigmoid(Input)
706
761
 
707
- if activation == None or activation == 'linear':
762
+ elif activation == None or activation == 'linear':
708
763
  Output += Input
709
764
 
710
- if activation == 'quartic':
765
+ elif activation == 'quartic':
711
766
  Output += quartic(Input)
712
767
 
713
- if activation == 'square_quartic':
768
+ elif activation == 'square_quartic':
714
769
  Output += square_quartic(Input)
715
770
 
716
- if activation == 'cubic_quadratic':
771
+ elif activation == 'cubic_quadratic':
717
772
  Output += cubic_quadratic(Input)
718
773
 
719
- if activation == 'exp_cubic':
774
+ elif activation == 'exp_cubic':
720
775
  Output += exp_cubic(Input)
721
776
 
722
- if activation == 'sine_square':
777
+ elif activation == 'sine_square':
723
778
  Output += sine_square(Input)
724
779
 
725
- if activation == 'logarithmic':
780
+ elif activation == 'logarithmic':
726
781
  Output += logarithmic(Input)
727
782
 
728
- if activation == 'scaled_cubic':
783
+ elif activation == 'scaled_cubic':
729
784
  Output += scaled_cubic(Input, 1.0)
730
785
 
731
- if activation == 'sine_offset':
786
+ elif activation == 'sine_offset':
732
787
  Output += sine_offset(Input, 1.0)
733
788
 
789
+ elif activation == 'spiral':
790
+ Output += spiral_activation(Input)
791
+
792
+ else:
793
+
794
+ print(Fore.RED + 'ERROR120:' + '"' + activation + '"'+ 'is not available. Please enter this code for avaliable activation function list: help(plan.activations_list)' + '' + Style.RESET_ALL)
795
+ sys.exit()
796
+
734
797
 
735
798
  Input = Output
736
799
 
@@ -899,6 +962,7 @@ def evaluate(
899
962
  except:
900
963
 
901
964
  print(Fore.RED + 'ERROR:' + infoTestModel + Style.RESET_ALL)
965
+ sys.exit()
902
966
 
903
967
  return W, y_preds, acc
904
968
 
@@ -1024,14 +1088,14 @@ def multiple_evaluate(
1024
1088
  except:
1025
1089
 
1026
1090
  print(Fore.RED + "ERROR: Testing model parameters like 'activation_potentiation' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: evaluate" + infoTestModel + Style.RESET_ALL)
1027
- return 'e'
1091
+
1092
+ sys.exit()
1028
1093
 
1029
1094
  return W, y_preds, acc
1030
1095
 
1031
1096
 
1032
1097
  def save_model(model_name,
1033
1098
  model_type,
1034
- class_count,
1035
1099
  test_acc,
1036
1100
  weights_type,
1037
1101
  weights_format,
@@ -1068,12 +1132,12 @@ def save_model(model_name,
1068
1132
  if weights_type != 'txt' and weights_type != 'npy' and weights_type != 'mat':
1069
1133
  print(Fore.RED + "ERROR110: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' from: save_model" +
1070
1134
  infosave_model + Style.RESET_ALL)
1071
- return 'e'
1135
+ sys.exit()
1072
1136
 
1073
1137
  if weights_format != 'd' and weights_format != 'f' and weights_format != 'raw':
1074
1138
  print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from: save_model" +
1075
1139
  infosave_model + Style.RESET_ALL)
1076
- return 'e'
1140
+ sys.exit()
1077
1141
 
1078
1142
  NeuronCount = 0
1079
1143
  SynapseCount = 0
@@ -1087,7 +1151,7 @@ def save_model(model_name,
1087
1151
 
1088
1152
  print(Fore.RED + "ERROR: Weight matrices has a problem from: save_model" +
1089
1153
  infosave_model + Style.RESET_ALL)
1090
- return 'e'
1154
+ sys.exit()
1091
1155
 
1092
1156
  if scaler_params != None:
1093
1157
 
@@ -1125,7 +1189,7 @@ def save_model(model_name,
1125
1189
 
1126
1190
  print(Fore.RED + "ERROR: Model log not saved probably model_path incorrect. Check the log parameters from: save_model" +
1127
1191
  infosave_model + Style.RESET_ALL)
1128
- return 'e'
1192
+ sys.exit()
1129
1193
 
1130
1194
  try:
1131
1195
 
@@ -1184,7 +1248,7 @@ def save_model(model_name,
1184
1248
  except:
1185
1249
 
1186
1250
  print(Fore.RED + "ERROR: Model Weights not saved. Check the Weight parameters. SaveFilePath expl: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: save_model" + infosave_model + Style.RESET_ALL)
1187
- return 'e'
1251
+ sys.exit()
1188
1252
  print(df)
1189
1253
  message = (
1190
1254
  Fore.GREEN + "Model Saved Successfully\n" +
@@ -1291,7 +1355,7 @@ def predict_model_ssd(Input, model_name, model_path):
1291
1355
  except:
1292
1356
  print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: predict_model_ssd" +
1293
1357
  infopredict_model_ssd + Style.RESET_ALL)
1294
- return 'e'
1358
+ sys.exit()
1295
1359
  for i, w in enumerate(Wc):
1296
1360
  W[i] = np.copy(w)
1297
1361
  return neural_layer
@@ -1346,7 +1410,7 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=[Non
1346
1410
  except:
1347
1411
  print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." +
1348
1412
  infopredict_model_ram + Style.RESET_ALL)
1349
- return 'e'
1413
+ sys.exit()
1350
1414
 
1351
1415
  def auto_balancer(x_train, y_train):
1352
1416
 
@@ -1393,7 +1457,7 @@ def auto_balancer(x_train, y_train):
1393
1457
  ) + " to: " + str(len(BalancedInputs)) + ". from: auto_balancer " + Style.RESET_ALL)
1394
1458
  except:
1395
1459
  print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoauto_balancer)
1396
- return 'e'
1460
+ sys.exit()
1397
1461
 
1398
1462
  return np.array(BalancedInputs), np.array(BalancedLabels)
1399
1463
 
@@ -1448,7 +1512,7 @@ def synthetic_augmentation(x_train, y_train):
1448
1512
  return np.array(x_balanced), np.array(y_balanced)
1449
1513
 
1450
1514
 
1451
- def standard_scaler(x_train, x_test=None, scaler_params=None):
1515
+ def standard_scaler(x_train=None, x_test=None, scaler_params=None):
1452
1516
  info_standard_scaler = """
1453
1517
  Standardizes training and test datasets. x_test may be None.
1454
1518
 
@@ -1524,7 +1588,7 @@ def standard_scaler(x_train, x_test=None, scaler_params=None):
1524
1588
  print(
1525
1589
  Fore.RED + "ERROR: x_train and x_test must be list[numpyarray] from standard_scaler" + info_standard_scaler + Style.RESET_ALL)
1526
1590
 
1527
- return('e')
1591
+ sys.exit()
1528
1592
 
1529
1593
  def encode_one_hot(y_train, y_test):
1530
1594
  """
@@ -1883,7 +1947,7 @@ def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activation_potentiation)
1883
1947
 
1884
1948
  Z = [None] * len(grid_full)
1885
1949
 
1886
- predict_progress = tqdm(total=len(grid_full),leave=False, desc="Predicts For Desicion Boundary",ncols= 120)
1950
+ predict_progress = tqdm(total=len(grid_full),leave=False, desc="Predicts For Decision Boundary",ncols= 120)
1887
1951
 
1888
1952
  for i in range(len(grid_full)):
1889
1953
 
@@ -2036,8 +2100,8 @@ def manuel_balancer(x_train, y_train, target_samples_per_class):
2036
2100
  x_train = np.array(x_train)
2037
2101
  y_train = np.array(y_train)
2038
2102
  except:
2039
- print(Fore.GREEN + "x_tarin and y_train already numpyarray." + Style.RESET_ALL)
2040
2103
  pass
2104
+
2041
2105
  classes = np.arange(y_train.shape[1])
2042
2106
  class_count = len(classes)
2043
2107
 
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 3.3.0
4
- Summary: fit function changes: LTD parameter included for Deep PLAN and professional visualizing for training. in the fit funciton, show_training=True
3
+ Version: 3.3.2
4
+ Summary: Code improvements
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
7
7
  Keywords: model evaluation,classifcation,potentiation learning artficial neural networks
@@ -0,0 +1,6 @@
1
+ plan/__init__.py,sha256=LuFcY0nqAzpjTDWAZn7L7-wipwMpnREqVghPiva0Xjg,548
2
+ plan/plan.py,sha256=ZNyDnEO12v9AGYxJe691kT9NICb-NmC5AfGqnZStzRQ,68651
3
+ pyerualjetwork-3.3.2.dist-info/METADATA,sha256=GrHAKJ14Qf8vKcDtPoJL3lvD0q_c-kVFUdpt9gxakGQ,244
4
+ pyerualjetwork-3.3.2.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
5
+ pyerualjetwork-3.3.2.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
6
+ pyerualjetwork-3.3.2.dist-info/RECORD,,
@@ -1,6 +0,0 @@
1
- plan/__init__.py,sha256=LuFcY0nqAzpjTDWAZn7L7-wipwMpnREqVghPiva0Xjg,548
2
- plan/plan.py,sha256=2ccjNjnPWMlC1uatIdXpexBpTgdYlBQdd6Hua9cDrb0,67575
3
- pyerualjetwork-3.3.0.dist-info/METADATA,sha256=oaNPz8e5fpUVSBNugzKQskDpDYW41Fotoy_WVl-H5ao,368
4
- pyerualjetwork-3.3.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
5
- pyerualjetwork-3.3.0.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
6
- pyerualjetwork-3.3.0.dist-info/RECORD,,