pyerualjetwork 3.3.1__py3-none-any.whl → 3.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
plan/plan.py CHANGED
@@ -20,6 +20,7 @@ from scipy import io
20
20
  import scipy.io as sio
21
21
  from matplotlib.animation import ArtistAnimation
22
22
  import networkx as nx
23
+ import sys
23
24
 
24
25
  # BUILD -----
25
26
 
@@ -66,7 +67,7 @@ def fit(
66
67
  if len(x_train) != len(y_train):
67
68
 
68
69
  print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit", infoPLAN + Style.RESET_ALL)
69
- return 'e'
70
+ sys.exit()
70
71
 
71
72
  if val == True:
72
73
 
@@ -105,7 +106,7 @@ def fit(
105
106
 
106
107
  print(Fore.RED + "ERROR115: For showing training, val parameter must be True. from: fit",
107
108
  infoPLAN + Style.RESET_ALL)
108
- return 'e'
109
+ sys.exit()
109
110
 
110
111
 
111
112
  class_count = set()
@@ -170,7 +171,7 @@ def fit(
170
171
  if x_train_size != len(inp):
171
172
  print(Fore.RED + "ERROR304: All input matrices or vectors in x_train list, must be same size. from: fit",
172
173
  infoPLAN + Style.RESET_ALL)
173
- return 'e'
174
+ sys.exit()
174
175
 
175
176
  neural_layer = inp
176
177
 
@@ -341,7 +342,7 @@ def generate_fixed_positions(G, layout_type='circular'):
341
342
  pos[node] = (i % grid_size, i // grid_size)
342
343
  else:
343
344
  raise ValueError("Unsupported layout_type. Use 'circular' or 'grid'.")
344
-
345
+
345
346
  return pos
346
347
 
347
348
  def weight_normalization(
@@ -366,11 +367,8 @@ def weight_normalization(
366
367
  return W
367
368
 
368
369
  def weight_identification(
369
- layer_count, # int: Number of layers in the neural network.
370
- class_count, # int: Number of classes in the classification task.
371
370
  fex_neurons,
372
371
  cat_neurons, # list[num]: List of neuron counts for each layer.
373
- x_train_size # int: Size of the input data.
374
372
  ) -> str:
375
373
  """
376
374
  Identifies the weights for a neural network model.
@@ -576,15 +574,58 @@ def sine_square(x):
576
574
  def logarithmic(x):
577
575
  return np.log(x**2 + 1)
578
576
 
579
- def power(x, p):
580
- return x**p
581
-
582
577
  def scaled_cubic(x, alpha=1.0):
583
578
  return alpha * x**3
584
579
 
585
580
  def sine_offset(x, beta=0.0):
586
581
  return np.sin(x + beta)
587
582
 
583
+ def activations_list():
584
+ """
585
+ spiral,
586
+ sigmoid,
587
+ relu,
588
+ tanh,
589
+ swish,
590
+ circular,
591
+ mod_circular,
592
+ tanh_circular,
593
+ leaky_relu,
594
+ softplus,
595
+ elu,
596
+ gelu,
597
+ selu,
598
+ sinakt,
599
+ p_squared,
600
+ sglu,
601
+ dlrelu,
602
+ exsig,
603
+ acos,
604
+ gla,
605
+ srelu,
606
+ qelu,
607
+ isra,
608
+ waveakt,
609
+ arctan,
610
+ bent_identity,
611
+ sech,
612
+ softsign,
613
+ pwl,
614
+ cubic,
615
+ gaussian,
616
+ sine,
617
+ tanh_square,
618
+ mod_sigmoid,
619
+ quartic,
620
+ square_quartic,
621
+ cubic_quadratic,
622
+ exp_cubic,
623
+ sine_square,
624
+ logarithmic,
625
+ scaled_cubic,
626
+ sine_offset
627
+ """
628
+
588
629
 
589
630
  def fex(
590
631
  Input, # list[num]: Input data.
@@ -619,135 +660,140 @@ def fex(
619
660
  if activation == 'sigmoid':
620
661
  Output += Sigmoid(Input)
621
662
 
622
- if activation == 'swish':
663
+ elif activation == 'swish':
623
664
  Output += swish(Input)
624
665
 
625
- if activation == 'circular':
666
+ elif activation == 'circular':
626
667
  Output += circular_activation(Input)
627
668
 
628
- if activation == 'mod_circular':
669
+ elif activation == 'mod_circular':
629
670
  Output += modular_circular_activation(Input)
630
671
 
631
- if activation == 'tanh_circular':
672
+ elif activation == 'tanh_circular':
632
673
  Output += tanh_circular_activation(Input)
633
674
 
634
- if activation == 'leaky_relu':
675
+ elif activation == 'leaky_relu':
635
676
  Output += leaky_relu(Input)
636
677
 
637
- if activation == 'relu':
678
+ elif activation == 'relu':
638
679
  Output += Relu(Input)
639
680
 
640
- if activation == 'softplus':
681
+ elif activation == 'softplus':
641
682
  Output += softplus(Input)
642
683
 
643
- if activation == 'elu':
684
+ elif activation == 'elu':
644
685
  Output += elu(Input)
645
686
 
646
- if activation == 'gelu':
687
+ elif activation == 'gelu':
647
688
  Output += gelu(Input)
648
689
 
649
- if activation == 'selu':
690
+ elif activation == 'selu':
650
691
  Output += selu(Input)
651
692
 
652
- if activation == 'softmax':
693
+ elif activation == 'softmax':
653
694
  Output += Softmax(Input)
654
695
 
655
- if activation == 'tanh':
696
+ elif activation == 'tanh':
656
697
  Output += tanh(Input)
657
698
 
658
- if activation == 'sinakt':
699
+ elif activation == 'sinakt':
659
700
  Output += sinakt(Input)
660
701
 
661
- if activation == 'p_squared':
702
+ elif activation == 'p_squared':
662
703
  Output += p_squared(Input)
663
704
 
664
- if activation == 'sglu':
705
+ elif activation == 'sglu':
665
706
  Output += sglu(Input, alpha=1.0)
666
707
 
667
- if activation == 'dlrelu':
708
+ elif activation == 'dlrelu':
668
709
  Output += dlrelu(Input)
669
710
 
670
- if activation == 'exsig':
711
+ elif activation == 'exsig':
671
712
  Output += exsig(Input)
672
713
 
673
- if activation == 'acos':
714
+ elif activation == 'acos':
674
715
  Output += acos(Input, alpha=1.0, beta=0.0)
675
716
 
676
- if activation == 'gla':
717
+ elif activation == 'gla':
677
718
  Output += gla(Input, alpha=1.0, mu=0.0)
678
719
 
679
- if activation == 'srelu':
720
+ elif activation == 'srelu':
680
721
  Output += srelu(Input)
681
722
 
682
- if activation == 'qelu':
723
+ elif activation == 'qelu':
683
724
  Output += qelu(Input)
684
725
 
685
- if activation == 'isra':
726
+ elif activation == 'isra':
686
727
  Output += isra(Input)
687
728
 
688
- if activation == 'waveakt':
729
+ elif activation == 'waveakt':
689
730
  Output += waveakt(Input)
690
731
 
691
- if activation == 'arctan':
732
+ elif activation == 'arctan':
692
733
  Output += arctan(Input)
693
734
 
694
- if activation == 'bent_identity':
735
+ elif activation == 'bent_identity':
695
736
  Output += bent_identity(Input)
696
737
 
697
- if activation == 'sech':
738
+ elif activation == 'sech':
698
739
  Output += sech(Input)
699
740
 
700
- if activation == 'softsign':
741
+ elif activation == 'softsign':
701
742
  Output += softsign(Input)
702
743
 
703
- if activation == 'pwl':
744
+ elif activation == 'pwl':
704
745
  Output += pwl(Input)
705
746
 
706
- if activation == 'cubic':
747
+ elif activation == 'cubic':
707
748
  Output += cubic(Input)
708
749
 
709
- if activation == 'gaussian':
750
+ elif activation == 'gaussian':
710
751
  Output += gaussian(Input)
711
752
 
712
- if activation == 'sine':
753
+ elif activation == 'sine':
713
754
  Output += sine(Input)
714
755
 
715
- if activation == 'tanh_square':
756
+ elif activation == 'tanh_square':
716
757
  Output += tanh_square(Input)
717
758
 
718
- if activation == 'mod_sigmoid':
759
+ elif activation == 'mod_sigmoid':
719
760
  Output += mod_sigmoid(Input)
720
761
 
721
- if activation == None or activation == 'linear':
762
+ elif activation == None or activation == 'linear':
722
763
  Output += Input
723
764
 
724
- if activation == 'quartic':
765
+ elif activation == 'quartic':
725
766
  Output += quartic(Input)
726
767
 
727
- if activation == 'square_quartic':
768
+ elif activation == 'square_quartic':
728
769
  Output += square_quartic(Input)
729
770
 
730
- if activation == 'cubic_quadratic':
771
+ elif activation == 'cubic_quadratic':
731
772
  Output += cubic_quadratic(Input)
732
773
 
733
- if activation == 'exp_cubic':
774
+ elif activation == 'exp_cubic':
734
775
  Output += exp_cubic(Input)
735
776
 
736
- if activation == 'sine_square':
777
+ elif activation == 'sine_square':
737
778
  Output += sine_square(Input)
738
779
 
739
- if activation == 'logarithmic':
780
+ elif activation == 'logarithmic':
740
781
  Output += logarithmic(Input)
741
782
 
742
- if activation == 'scaled_cubic':
783
+ elif activation == 'scaled_cubic':
743
784
  Output += scaled_cubic(Input, 1.0)
744
785
 
745
- if activation == 'sine_offset':
786
+ elif activation == 'sine_offset':
746
787
  Output += sine_offset(Input, 1.0)
747
788
 
748
- if activation == 'spiral':
789
+ elif activation == 'spiral':
749
790
  Output += spiral_activation(Input)
750
791
 
792
+ else:
793
+
794
+ print(Fore.RED + 'ERROR120:' + '"' + activation + '"'+ 'is not available. Please enter this code for avaliable activation function list: help(plan.activations_list)' + '' + Style.RESET_ALL)
795
+ sys.exit()
796
+
751
797
 
752
798
  Input = Output
753
799
 
@@ -916,6 +962,7 @@ def evaluate(
916
962
  except:
917
963
 
918
964
  print(Fore.RED + 'ERROR:' + infoTestModel + Style.RESET_ALL)
965
+ sys.exit()
919
966
 
920
967
  return W, y_preds, acc
921
968
 
@@ -1041,7 +1088,8 @@ def multiple_evaluate(
1041
1088
  except:
1042
1089
 
1043
1090
  print(Fore.RED + "ERROR: Testing model parameters like 'activation_potentiation' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: evaluate" + infoTestModel + Style.RESET_ALL)
1044
- return 'e'
1091
+
1092
+ sys.exit()
1045
1093
 
1046
1094
  return W, y_preds, acc
1047
1095
 
@@ -1084,12 +1132,12 @@ def save_model(model_name,
1084
1132
  if weights_type != 'txt' and weights_type != 'npy' and weights_type != 'mat':
1085
1133
  print(Fore.RED + "ERROR110: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' from: save_model" +
1086
1134
  infosave_model + Style.RESET_ALL)
1087
- return 'e'
1135
+ sys.exit()
1088
1136
 
1089
1137
  if weights_format != 'd' and weights_format != 'f' and weights_format != 'raw':
1090
1138
  print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from: save_model" +
1091
1139
  infosave_model + Style.RESET_ALL)
1092
- return 'e'
1140
+ sys.exit()
1093
1141
 
1094
1142
  NeuronCount = 0
1095
1143
  SynapseCount = 0
@@ -1103,7 +1151,7 @@ def save_model(model_name,
1103
1151
 
1104
1152
  print(Fore.RED + "ERROR: Weight matrices has a problem from: save_model" +
1105
1153
  infosave_model + Style.RESET_ALL)
1106
- return 'e'
1154
+ sys.exit()
1107
1155
 
1108
1156
  if scaler_params != None:
1109
1157
 
@@ -1141,7 +1189,7 @@ def save_model(model_name,
1141
1189
 
1142
1190
  print(Fore.RED + "ERROR: Model log not saved probably model_path incorrect. Check the log parameters from: save_model" +
1143
1191
  infosave_model + Style.RESET_ALL)
1144
- return 'e'
1192
+ sys.exit()
1145
1193
 
1146
1194
  try:
1147
1195
 
@@ -1200,7 +1248,7 @@ def save_model(model_name,
1200
1248
  except:
1201
1249
 
1202
1250
  print(Fore.RED + "ERROR: Model Weights not saved. Check the Weight parameters. SaveFilePath expl: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: save_model" + infosave_model + Style.RESET_ALL)
1203
- return 'e'
1251
+ sys.exit()
1204
1252
  print(df)
1205
1253
  message = (
1206
1254
  Fore.GREEN + "Model Saved Successfully\n" +
@@ -1307,7 +1355,7 @@ def predict_model_ssd(Input, model_name, model_path):
1307
1355
  except:
1308
1356
  print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: predict_model_ssd" +
1309
1357
  infopredict_model_ssd + Style.RESET_ALL)
1310
- return 'e'
1358
+ sys.exit()
1311
1359
  for i, w in enumerate(Wc):
1312
1360
  W[i] = np.copy(w)
1313
1361
  return neural_layer
@@ -1362,7 +1410,7 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=[Non
1362
1410
  except:
1363
1411
  print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." +
1364
1412
  infopredict_model_ram + Style.RESET_ALL)
1365
- return 'e'
1413
+ sys.exit()
1366
1414
 
1367
1415
  def auto_balancer(x_train, y_train):
1368
1416
 
@@ -1409,7 +1457,7 @@ def auto_balancer(x_train, y_train):
1409
1457
  ) + " to: " + str(len(BalancedInputs)) + ". from: auto_balancer " + Style.RESET_ALL)
1410
1458
  except:
1411
1459
  print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoauto_balancer)
1412
- return 'e'
1460
+ sys.exit()
1413
1461
 
1414
1462
  return np.array(BalancedInputs), np.array(BalancedLabels)
1415
1463
 
@@ -1540,7 +1588,7 @@ def standard_scaler(x_train=None, x_test=None, scaler_params=None):
1540
1588
  print(
1541
1589
  Fore.RED + "ERROR: x_train and x_test must be list[numpyarray] from standard_scaler" + info_standard_scaler + Style.RESET_ALL)
1542
1590
 
1543
- return('e')
1591
+ sys.exit()
1544
1592
 
1545
1593
  def encode_one_hot(y_train, y_test):
1546
1594
  """
@@ -1899,7 +1947,7 @@ def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activation_potentiation)
1899
1947
 
1900
1948
  Z = [None] * len(grid_full)
1901
1949
 
1902
- predict_progress = tqdm(total=len(grid_full),leave=False, desc="Predicts For Desicion Boundary",ncols= 120)
1950
+ predict_progress = tqdm(total=len(grid_full),leave=False, desc="Predicts For Decision Boundary",ncols= 120)
1903
1951
 
1904
1952
  for i in range(len(grid_full)):
1905
1953
 
@@ -2052,8 +2100,8 @@ def manuel_balancer(x_train, y_train, target_samples_per_class):
2052
2100
  x_train = np.array(x_train)
2053
2101
  y_train = np.array(y_train)
2054
2102
  except:
2055
- print(Fore.GREEN + "x_tarin and y_train already numpyarray." + Style.RESET_ALL)
2056
2103
  pass
2104
+
2057
2105
  classes = np.arange(y_train.shape[1])
2058
2106
  class_count = len(classes)
2059
2107
 
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 3.3.1
4
- Summary: fit function changes: LTD parameter included for Deep PLAN and professional visualizing for training. in the fit funciton, show_training=True
3
+ Version: 3.3.2
4
+ Summary: Code improvements
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
7
7
  Keywords: model evaluation,classifcation,potentiation learning artficial neural networks
@@ -0,0 +1,6 @@
1
+ plan/__init__.py,sha256=LuFcY0nqAzpjTDWAZn7L7-wipwMpnREqVghPiva0Xjg,548
2
+ plan/plan.py,sha256=ZNyDnEO12v9AGYxJe691kT9NICb-NmC5AfGqnZStzRQ,68651
3
+ pyerualjetwork-3.3.2.dist-info/METADATA,sha256=GrHAKJ14Qf8vKcDtPoJL3lvD0q_c-kVFUdpt9gxakGQ,244
4
+ pyerualjetwork-3.3.2.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
5
+ pyerualjetwork-3.3.2.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
6
+ pyerualjetwork-3.3.2.dist-info/RECORD,,
@@ -1,6 +0,0 @@
1
- plan/__init__.py,sha256=LuFcY0nqAzpjTDWAZn7L7-wipwMpnREqVghPiva0Xjg,548
2
- plan/plan.py,sha256=39AW4YIsz6htzmlVGLLUab1wpSQw6u2PeE9I488tSx4,67925
3
- pyerualjetwork-3.3.1.dist-info/METADATA,sha256=0QhZbV-1i7-9ynrRwLTEP6MaWzBzD9yc2uIZeMFkj5I,368
4
- pyerualjetwork-3.3.1.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
5
- pyerualjetwork-3.3.1.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
6
- pyerualjetwork-3.3.1.dist-info/RECORD,,