pyerualjetwork 3.3.1__py3-none-any.whl → 3.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
plan/plan.py CHANGED
@@ -20,6 +20,7 @@ from scipy import io
20
20
  import scipy.io as sio
21
21
  from matplotlib.animation import ArtistAnimation
22
22
  import networkx as nx
23
+ import sys
23
24
 
24
25
  # BUILD -----
25
26
 
@@ -41,7 +42,7 @@ def fit(
41
42
  infoPLAN = """
42
43
  Creates and configures a PLAN model.
43
44
 
44
- Args:
45
+ fit Args:
45
46
  x_train (list[num]): List or numarray of input data.
46
47
  y_train (list[num]): List or numarray of target labels. (one hot encoded)
47
48
  val (None or True): validation in training process ? None or True default: None (optional)
@@ -66,7 +67,7 @@ def fit(
66
67
  if len(x_train) != len(y_train):
67
68
 
68
69
  print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit", infoPLAN + Style.RESET_ALL)
69
- return 'e'
70
+ sys.exit()
70
71
 
71
72
  if val == True:
72
73
 
@@ -105,7 +106,7 @@ def fit(
105
106
 
106
107
  print(Fore.RED + "ERROR115: For showing training, val parameter must be True. from: fit",
107
108
  infoPLAN + Style.RESET_ALL)
108
- return 'e'
109
+ sys.exit()
109
110
 
110
111
 
111
112
  class_count = set()
@@ -124,10 +125,12 @@ def fit(
124
125
  else:
125
126
 
126
127
  layers = ['fex'] * visible_layer
128
+
129
+ x_train_0 = np.array(x_train[0])
130
+
131
+ x_train__0_vec = x_train_0.ravel()
127
132
 
128
- x_train[0] = np.array(x_train[0])
129
- x_train[0] = x_train[0].ravel()
130
- x_train_size = len(x_train[0])
133
+ x_train_size = len(x_train__0_vec)
131
134
 
132
135
  if visible_layer == None:
133
136
 
@@ -166,11 +169,11 @@ def fit(
166
169
 
167
170
  inp = np.array(inp)
168
171
  inp = inp.ravel()
169
-
172
+
170
173
  if x_train_size != len(inp):
171
174
  print(Fore.RED + "ERROR304: All input matrices or vectors in x_train list, must be same size. from: fit",
172
175
  infoPLAN + Style.RESET_ALL)
173
- return 'e'
176
+ sys.exit()
174
177
 
175
178
  neural_layer = inp
176
179
 
@@ -341,7 +344,7 @@ def generate_fixed_positions(G, layout_type='circular'):
341
344
  pos[node] = (i % grid_size, i // grid_size)
342
345
  else:
343
346
  raise ValueError("Unsupported layout_type. Use 'circular' or 'grid'.")
344
-
347
+
345
348
  return pos
346
349
 
347
350
  def weight_normalization(
@@ -366,11 +369,8 @@ def weight_normalization(
366
369
  return W
367
370
 
368
371
  def weight_identification(
369
- layer_count, # int: Number of layers in the neural network.
370
- class_count, # int: Number of classes in the classification task.
371
372
  fex_neurons,
372
373
  cat_neurons, # list[num]: List of neuron counts for each layer.
373
- x_train_size # int: Size of the input data.
374
374
  ) -> str:
375
375
  """
376
376
  Identifies the weights for a neural network model.
@@ -576,15 +576,58 @@ def sine_square(x):
576
576
  def logarithmic(x):
577
577
  return np.log(x**2 + 1)
578
578
 
579
- def power(x, p):
580
- return x**p
581
-
582
579
  def scaled_cubic(x, alpha=1.0):
583
580
  return alpha * x**3
584
581
 
585
582
  def sine_offset(x, beta=0.0):
586
583
  return np.sin(x + beta)
587
584
 
585
+ def activations_list():
586
+ """
587
+ spiral,
588
+ sigmoid,
589
+ relu,
590
+ tanh,: good for general datasets
591
+ swish,
592
+ circular,
593
+ mod_circular,
594
+ tanh_circular,
595
+ leaky_relu,
596
+ softplus,
597
+ elu,
598
+ gelu,
599
+ selu,
600
+ sinakt,
601
+ p_squared,
602
+ sglu,
603
+ dlrelu,
604
+ exsig,
605
+ acos,
606
+ gla,
607
+ srelu,
608
+ qelu,
609
+ isra,
610
+ waveakt,
611
+ arctan,
612
+ bent_identity,: good for image datasets
613
+ sech,
614
+ softsign,
615
+ pwl,
616
+ cubic,
617
+ gaussian,
618
+ sine,
619
+ tanh_square,
620
+ mod_sigmoid,
621
+ quartic,
622
+ square_quartic,
623
+ cubic_quadratic,
624
+ exp_cubic,
625
+ sine_square,
626
+ logarithmic,
627
+ scaled_cubic,
628
+ sine_offset
629
+ """
630
+
588
631
 
589
632
  def fex(
590
633
  Input, # list[num]: Input data.
@@ -619,135 +662,140 @@ def fex(
619
662
  if activation == 'sigmoid':
620
663
  Output += Sigmoid(Input)
621
664
 
622
- if activation == 'swish':
665
+ elif activation == 'swish':
623
666
  Output += swish(Input)
624
667
 
625
- if activation == 'circular':
668
+ elif activation == 'circular':
626
669
  Output += circular_activation(Input)
627
670
 
628
- if activation == 'mod_circular':
671
+ elif activation == 'mod_circular':
629
672
  Output += modular_circular_activation(Input)
630
673
 
631
- if activation == 'tanh_circular':
674
+ elif activation == 'tanh_circular':
632
675
  Output += tanh_circular_activation(Input)
633
676
 
634
- if activation == 'leaky_relu':
677
+ elif activation == 'leaky_relu':
635
678
  Output += leaky_relu(Input)
636
679
 
637
- if activation == 'relu':
680
+ elif activation == 'relu':
638
681
  Output += Relu(Input)
639
682
 
640
- if activation == 'softplus':
683
+ elif activation == 'softplus':
641
684
  Output += softplus(Input)
642
685
 
643
- if activation == 'elu':
686
+ elif activation == 'elu':
644
687
  Output += elu(Input)
645
688
 
646
- if activation == 'gelu':
689
+ elif activation == 'gelu':
647
690
  Output += gelu(Input)
648
691
 
649
- if activation == 'selu':
692
+ elif activation == 'selu':
650
693
  Output += selu(Input)
651
694
 
652
- if activation == 'softmax':
695
+ elif activation == 'softmax':
653
696
  Output += Softmax(Input)
654
697
 
655
- if activation == 'tanh':
698
+ elif activation == 'tanh':
656
699
  Output += tanh(Input)
657
700
 
658
- if activation == 'sinakt':
701
+ elif activation == 'sinakt':
659
702
  Output += sinakt(Input)
660
703
 
661
- if activation == 'p_squared':
704
+ elif activation == 'p_squared':
662
705
  Output += p_squared(Input)
663
706
 
664
- if activation == 'sglu':
707
+ elif activation == 'sglu':
665
708
  Output += sglu(Input, alpha=1.0)
666
709
 
667
- if activation == 'dlrelu':
710
+ elif activation == 'dlrelu':
668
711
  Output += dlrelu(Input)
669
712
 
670
- if activation == 'exsig':
713
+ elif activation == 'exsig':
671
714
  Output += exsig(Input)
672
715
 
673
- if activation == 'acos':
716
+ elif activation == 'acos':
674
717
  Output += acos(Input, alpha=1.0, beta=0.0)
675
718
 
676
- if activation == 'gla':
719
+ elif activation == 'gla':
677
720
  Output += gla(Input, alpha=1.0, mu=0.0)
678
721
 
679
- if activation == 'srelu':
722
+ elif activation == 'srelu':
680
723
  Output += srelu(Input)
681
724
 
682
- if activation == 'qelu':
725
+ elif activation == 'qelu':
683
726
  Output += qelu(Input)
684
727
 
685
- if activation == 'isra':
728
+ elif activation == 'isra':
686
729
  Output += isra(Input)
687
730
 
688
- if activation == 'waveakt':
731
+ elif activation == 'waveakt':
689
732
  Output += waveakt(Input)
690
733
 
691
- if activation == 'arctan':
734
+ elif activation == 'arctan':
692
735
  Output += arctan(Input)
693
736
 
694
- if activation == 'bent_identity':
737
+ elif activation == 'bent_identity':
695
738
  Output += bent_identity(Input)
696
739
 
697
- if activation == 'sech':
740
+ elif activation == 'sech':
698
741
  Output += sech(Input)
699
742
 
700
- if activation == 'softsign':
743
+ elif activation == 'softsign':
701
744
  Output += softsign(Input)
702
745
 
703
- if activation == 'pwl':
746
+ elif activation == 'pwl':
704
747
  Output += pwl(Input)
705
748
 
706
- if activation == 'cubic':
749
+ elif activation == 'cubic':
707
750
  Output += cubic(Input)
708
751
 
709
- if activation == 'gaussian':
752
+ elif activation == 'gaussian':
710
753
  Output += gaussian(Input)
711
754
 
712
- if activation == 'sine':
755
+ elif activation == 'sine':
713
756
  Output += sine(Input)
714
757
 
715
- if activation == 'tanh_square':
758
+ elif activation == 'tanh_square':
716
759
  Output += tanh_square(Input)
717
760
 
718
- if activation == 'mod_sigmoid':
761
+ elif activation == 'mod_sigmoid':
719
762
  Output += mod_sigmoid(Input)
720
763
 
721
- if activation == None or activation == 'linear':
764
+ elif activation == None or activation == 'linear':
722
765
  Output += Input
723
766
 
724
- if activation == 'quartic':
767
+ elif activation == 'quartic':
725
768
  Output += quartic(Input)
726
769
 
727
- if activation == 'square_quartic':
770
+ elif activation == 'square_quartic':
728
771
  Output += square_quartic(Input)
729
772
 
730
- if activation == 'cubic_quadratic':
773
+ elif activation == 'cubic_quadratic':
731
774
  Output += cubic_quadratic(Input)
732
775
 
733
- if activation == 'exp_cubic':
776
+ elif activation == 'exp_cubic':
734
777
  Output += exp_cubic(Input)
735
778
 
736
- if activation == 'sine_square':
779
+ elif activation == 'sine_square':
737
780
  Output += sine_square(Input)
738
781
 
739
- if activation == 'logarithmic':
782
+ elif activation == 'logarithmic':
740
783
  Output += logarithmic(Input)
741
784
 
742
- if activation == 'scaled_cubic':
785
+ elif activation == 'scaled_cubic':
743
786
  Output += scaled_cubic(Input, 1.0)
744
787
 
745
- if activation == 'sine_offset':
788
+ elif activation == 'sine_offset':
746
789
  Output += sine_offset(Input, 1.0)
747
790
 
748
- if activation == 'spiral':
791
+ elif activation == 'spiral':
749
792
  Output += spiral_activation(Input)
750
793
 
794
+ else:
795
+
796
+ print(Fore.RED + 'ERROR120:' + '"' + activation + '"'+ 'is not available. Please enter this code for avaliable activation function list: help(plan.activations_list)' + '' + Style.RESET_ALL)
797
+ sys.exit()
798
+
751
799
 
752
800
  Input = Output
753
801
 
@@ -805,13 +853,8 @@ def normalization(
805
853
  (num) Scaled input data after normalization.
806
854
  """
807
855
 
808
- AbsVector = np.abs(Input)
809
-
810
- MaxAbs = np.max(AbsVector)
811
-
812
- ScaledInput = Input / MaxAbs
813
-
814
- return ScaledInput
856
+ MaxAbs = np.max(np.abs(Input)) # Direkt maksimumu hesapla
857
+ return Input / MaxAbs # Normalizasyonu geri döndür
815
858
 
816
859
 
817
860
  def evaluate(
@@ -912,10 +955,11 @@ def evaluate(
912
955
 
913
956
  for i, w in enumerate(Wc):
914
957
  W[i] = np.copy(w)
915
-
958
+
916
959
  except:
917
960
 
918
961
  print(Fore.RED + 'ERROR:' + infoTestModel + Style.RESET_ALL)
962
+ sys.exit()
919
963
 
920
964
  return W, y_preds, acc
921
965
 
@@ -1041,7 +1085,8 @@ def multiple_evaluate(
1041
1085
  except:
1042
1086
 
1043
1087
  print(Fore.RED + "ERROR: Testing model parameters like 'activation_potentiation' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: evaluate" + infoTestModel + Style.RESET_ALL)
1044
- return 'e'
1088
+
1089
+ sys.exit()
1045
1090
 
1046
1091
  return W, y_preds, acc
1047
1092
 
@@ -1084,12 +1129,12 @@ def save_model(model_name,
1084
1129
  if weights_type != 'txt' and weights_type != 'npy' and weights_type != 'mat':
1085
1130
  print(Fore.RED + "ERROR110: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' from: save_model" +
1086
1131
  infosave_model + Style.RESET_ALL)
1087
- return 'e'
1132
+ sys.exit()
1088
1133
 
1089
1134
  if weights_format != 'd' and weights_format != 'f' and weights_format != 'raw':
1090
1135
  print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from: save_model" +
1091
1136
  infosave_model + Style.RESET_ALL)
1092
- return 'e'
1137
+ sys.exit()
1093
1138
 
1094
1139
  NeuronCount = 0
1095
1140
  SynapseCount = 0
@@ -1103,7 +1148,7 @@ def save_model(model_name,
1103
1148
 
1104
1149
  print(Fore.RED + "ERROR: Weight matrices has a problem from: save_model" +
1105
1150
  infosave_model + Style.RESET_ALL)
1106
- return 'e'
1151
+ sys.exit()
1107
1152
 
1108
1153
  if scaler_params != None:
1109
1154
 
@@ -1141,7 +1186,7 @@ def save_model(model_name,
1141
1186
 
1142
1187
  print(Fore.RED + "ERROR: Model log not saved probably model_path incorrect. Check the log parameters from: save_model" +
1143
1188
  infosave_model + Style.RESET_ALL)
1144
- return 'e'
1189
+ sys.exit()
1145
1190
 
1146
1191
  try:
1147
1192
 
@@ -1200,7 +1245,7 @@ def save_model(model_name,
1200
1245
  except:
1201
1246
 
1202
1247
  print(Fore.RED + "ERROR: Model Weights not saved. Check the Weight parameters. SaveFilePath expl: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: save_model" + infosave_model + Style.RESET_ALL)
1203
- return 'e'
1248
+ sys.exit()
1204
1249
  print(df)
1205
1250
  message = (
1206
1251
  Fore.GREEN + "Model Saved Successfully\n" +
@@ -1307,7 +1352,7 @@ def predict_model_ssd(Input, model_name, model_path):
1307
1352
  except:
1308
1353
  print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: predict_model_ssd" +
1309
1354
  infopredict_model_ssd + Style.RESET_ALL)
1310
- return 'e'
1355
+ sys.exit()
1311
1356
  for i, w in enumerate(Wc):
1312
1357
  W[i] = np.copy(w)
1313
1358
  return neural_layer
@@ -1362,7 +1407,7 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=[Non
1362
1407
  except:
1363
1408
  print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." +
1364
1409
  infopredict_model_ram + Style.RESET_ALL)
1365
- return 'e'
1410
+ sys.exit()
1366
1411
 
1367
1412
  def auto_balancer(x_train, y_train):
1368
1413
 
@@ -1409,7 +1454,7 @@ def auto_balancer(x_train, y_train):
1409
1454
  ) + " to: " + str(len(BalancedInputs)) + ". from: auto_balancer " + Style.RESET_ALL)
1410
1455
  except:
1411
1456
  print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoauto_balancer)
1412
- return 'e'
1457
+ sys.exit()
1413
1458
 
1414
1459
  return np.array(BalancedInputs), np.array(BalancedLabels)
1415
1460
 
@@ -1540,7 +1585,7 @@ def standard_scaler(x_train=None, x_test=None, scaler_params=None):
1540
1585
  print(
1541
1586
  Fore.RED + "ERROR: x_train and x_test must be list[numpyarray] from standard_scaler" + info_standard_scaler + Style.RESET_ALL)
1542
1587
 
1543
- return('e')
1588
+ sys.exit()
1544
1589
 
1545
1590
  def encode_one_hot(y_train, y_test):
1546
1591
  """
@@ -1894,26 +1939,32 @@ def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activation_potentiation)
1894
1939
  np.arange(y_min, y_max, h))
1895
1940
 
1896
1941
  grid = np.c_[xx.ravel(), yy.ravel()]
1897
- grid_full = np.zeros((grid.shape[0], x_test.shape[1]))
1898
- grid_full[:, feature_indices] = grid
1899
-
1900
- Z = [None] * len(grid_full)
1901
1942
 
1902
- predict_progress = tqdm(total=len(grid_full),leave=False, desc="Predicts For Desicion Boundary",ncols= 120)
1943
+ try:
1903
1944
 
1904
- for i in range(len(grid_full)):
1945
+ grid_full = np.zeros((grid.shape[0], x_test.shape[1]))
1946
+ grid_full[:, feature_indices] = grid
1947
+
1948
+ Z = [None] * len(grid_full)
1905
1949
 
1906
- Z[i] = np.argmax(predict_model_ram(grid_full[i], W=W, activation_potentiation=activation_potentiation))
1907
- predict_progress.update(1)
1950
+ predict_progress = tqdm(total=len(grid_full),leave=False, desc="Predicts For Decision Boundary",ncols= 120)
1908
1951
 
1909
- Z = np.array(Z)
1910
- Z = Z.reshape(xx.shape)
1952
+ for i in range(len(grid_full)):
1953
+
1954
+ Z[i] = np.argmax(predict_model_ram(grid_full[i], W=W, activation_potentiation=activation_potentiation))
1955
+ predict_progress.update(1)
1956
+
1957
+ Z = np.array(Z)
1958
+ Z = Z.reshape(xx.shape)
1911
1959
 
1912
- axs[1,1].contourf(xx, yy, Z, alpha=0.8)
1913
- axs[1,1].scatter(x_test[:, feature_indices[0]], x_test[:, feature_indices[1]], c=decode_one_hot(y_test), edgecolors='k', marker='o', s=20, alpha=0.9)
1914
- axs[1,1].set_xlabel(f'Feature {0 + 1}')
1915
- axs[1,1].set_ylabel(f'Feature {1 + 1}')
1916
- axs[1,1].set_title('Decision Boundary')
1960
+ axs[1,1].contourf(xx, yy, Z, alpha=0.8)
1961
+ axs[1,1].scatter(x_test[:, feature_indices[0]], x_test[:, feature_indices[1]], c=decode_one_hot(y_test), edgecolors='k', marker='o', s=20, alpha=0.9)
1962
+ axs[1,1].set_xlabel(f'Feature {0 + 1}')
1963
+ axs[1,1].set_ylabel(f'Feature {1 + 1}')
1964
+ axs[1,1].set_title('Decision Boundary')
1965
+
1966
+ except:
1967
+ pass
1917
1968
 
1918
1969
  plt.show()
1919
1970
 
@@ -2052,8 +2103,8 @@ def manuel_balancer(x_train, y_train, target_samples_per_class):
2052
2103
  x_train = np.array(x_train)
2053
2104
  y_train = np.array(y_train)
2054
2105
  except:
2055
- print(Fore.GREEN + "x_tarin and y_train already numpyarray." + Style.RESET_ALL)
2056
2106
  pass
2107
+
2057
2108
  classes = np.arange(y_train.shape[1])
2058
2109
  class_count = len(classes)
2059
2110
 
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 3.3.1
4
- Summary: fit function changes: LTD parameter included for Deep PLAN and professional visualizing for training. in the fit funciton, show_training=True
3
+ Version: 3.3.3
4
+ Summary: Code improvements
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
7
7
  Keywords: model evaluation,classifcation,potentiation learning artficial neural networks
@@ -0,0 +1,6 @@
1
+ plan/__init__.py,sha256=LuFcY0nqAzpjTDWAZn7L7-wipwMpnREqVghPiva0Xjg,548
2
+ plan/plan.py,sha256=Wf9sM9N_ibeaoiFKQGs940ysmQcb7ORLryw5I0oqT7Y,68803
3
+ pyerualjetwork-3.3.3.dist-info/METADATA,sha256=PkJreUZLFg-dTz4-kxLeFLl-XH6wclatKeejotkjQws,244
4
+ pyerualjetwork-3.3.3.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
5
+ pyerualjetwork-3.3.3.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
6
+ pyerualjetwork-3.3.3.dist-info/RECORD,,
@@ -1,6 +0,0 @@
1
- plan/__init__.py,sha256=LuFcY0nqAzpjTDWAZn7L7-wipwMpnREqVghPiva0Xjg,548
2
- plan/plan.py,sha256=39AW4YIsz6htzmlVGLLUab1wpSQw6u2PeE9I488tSx4,67925
3
- pyerualjetwork-3.3.1.dist-info/METADATA,sha256=0QhZbV-1i7-9ynrRwLTEP6MaWzBzD9yc2uIZeMFkj5I,368
4
- pyerualjetwork-3.3.1.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
5
- pyerualjetwork-3.3.1.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
6
- pyerualjetwork-3.3.1.dist-info/RECORD,,