pyerualjetwork 2.4.8__py3-none-any.whl → 2.4.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
plan/__init__.py ADDED
@@ -0,0 +1,5 @@
1
+ # pyerualjetwork/PLAN/__init__.py
2
+
3
+ # Bu dosya, plan modülünün ana giriş noktasıdır.
4
+
5
+ from .plan import auto_balancer, normalization, Softmax, Sigmoid, Relu, weight_identification, fex, fit, evaluate, save_model, load_model, predict_model_ssd, predict_model_ram, get_weights, get_df, get_preds, get_acc, synthetic_augmentation, standard_scaler, multiple_evaluate, encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate, manuel_balancer, weight_normalization
@@ -20,9 +20,9 @@ import seaborn as sns
20
20
 
21
21
  def fit(
22
22
  x_train: List[Union[int, float]],
23
- # At least two.. and one hot encoded
24
23
  y_train: List[Union[int, float, str]], # At least two.. and one hot encoded
25
- show_training
24
+ show_training,
25
+ activation_potential=None # (float): Input activation potential (optional)
26
26
  ) -> str:
27
27
 
28
28
  infoPLAN = """
@@ -32,11 +32,18 @@ def fit(
32
32
  x_train (list[num]): List of input data.
33
33
  y_train (list[num]): List of y_train. (one hot encoded)
34
34
  show_training (bool, str): True, None or 'final'
35
-
35
+ activation_potential (float): Input activation potential (optional)
36
36
  Returns:
37
37
  list([num]): (Weight matrices list, train_predictions list, Train_acc).
38
38
  error handled ?: Process status ('e')
39
39
  """
40
+
41
+ if activation_potential != None:
42
+
43
+ if activation_potential < 0 or activation_potential > 1:
44
+
45
+ print(Fore.RED + "ERROR101: ACTIVATION potential value must be in range 0-1. from: fit",infoPLAN)
46
+ return 'e'
40
47
 
41
48
  if len(x_train) != len(y_train):
42
49
  print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit", infoPLAN)
@@ -83,14 +90,12 @@ def fit(
83
90
  neural_layer = normalization(neural_layer)
84
91
 
85
92
  if Layer == 'fex':
86
- W[Lindex] = fex(neural_layer, W[Lindex], True, y[index])
93
+ W[Lindex] = fex(neural_layer, W[Lindex], True, y[index], activation_potential)
87
94
 
88
95
  for i, w in enumerate(W):
89
96
  trained_W[i] = trained_W[i] + w
90
97
 
91
- if show_training == True:
92
-
93
- fig, ax = plt.subplots(1, len(class_count), figsize=(18, 14))
98
+ if show_training == True or 'final':
94
99
 
95
100
  try:
96
101
  row = x_train[1].shape[0]
@@ -103,25 +108,29 @@ def fit(
103
108
  try:
104
109
  row, col = find_numbers(len(x_train[0]))
105
110
 
106
- except:
111
+ except:
107
112
 
108
113
  print(Fore.RED + 'ERROR: Change show_training to None. Input length cannot be reshaped', infoPLAN + Style.RESET_ALL)
109
114
  return 'e'
110
115
 
111
116
 
112
- for j in range(len(class_count)):
113
-
114
- mat = trained_W[0][j,:].reshape(row, col)
115
-
116
- ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
117
- ax[j].set_aspect('equal')
118
-
119
- ax[j].set_xticks([])
120
- ax[j].set_yticks([])
121
- ax[j].set_title(f'{j+1}. Neuron')
122
-
123
-
124
- plt.show()
117
+ if show_training == True:
118
+
119
+ fig, ax = plt.subplots(1, len(class_count), figsize=(18, 14))
120
+
121
+ for j in range(len(class_count)):
122
+
123
+ mat = trained_W[0][j,:].reshape(row, col)
124
+
125
+ ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
126
+ ax[j].set_aspect('equal')
127
+
128
+ ax[j].set_xticks([])
129
+ ax[j].set_yticks([])
130
+ ax[j].set_title(f'{j+1}. Neuron')
131
+
132
+
133
+ plt.show()
125
134
 
126
135
  W = weight_identification(
127
136
  len(layers) - 1, len(class_count), neurons, x_train_size)
@@ -154,14 +163,9 @@ def fit(
154
163
  except:
155
164
 
156
165
  print(Fore.MAGENTA + 'WARNING: You try train showing but inputs is raveled. x_train inputs should be reshaped for training_show.', infoPLAN + Style.RESET_ALL)
157
-
158
- try:
159
- row, col = find_numbers(len(x_train[0]))
160
-
161
- except:
162
- print(Fore.RED + 'ERROR: Change show_training to None. Input length cannot be reshaped', infoPLAN + Style.RESET_ALL)
163
- return 'e'
164
-
166
+
167
+ row, col = find_numbers(len(x_train[0]))
168
+
165
169
  for j in range(len(class_count)):
166
170
 
167
171
  mat = trained_W[0][j,:].reshape(row, col)
@@ -266,7 +270,8 @@ def fex(
266
270
  Input, # list[num]: Input data.
267
271
  w, # num: Weight matrix of the neural network.
268
272
  is_training, # bool: Flag indicating if the function is called during training (True or False).
269
- Class # int: Which class is, if training.
273
+ Class, # int: Which class is, if training.
274
+ activation_potential # float or None: Input activation potential (optional)
270
275
  ) -> tuple:
271
276
  """
272
277
  Applies feature extraction process to the input data using synaptic pruning.
@@ -276,22 +281,42 @@ def fex(
276
281
  w (num): Weight matrix of the neural network.
277
282
  is_training (bool): Flag indicating if the function is called during training (True or False).
278
283
  Class (int): if is during training then which class(label) ? is isnt then put None.
284
+ activation_potential (float or None): Threshold value for comparison. (optional)
279
285
 
280
286
  Returns:
281
287
  tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
282
288
  """
283
289
 
284
- if is_training == True:
285
-
290
+ if is_training == True and activation_potential == None:
291
+
286
292
  w[Class, :] = Input
287
293
 
288
294
  return w
289
295
 
290
- else:
291
-
292
- neural_layer = np.dot(w, Input)
293
-
294
- return neural_layer
296
+ elif is_training == True and activation_potential != None:
297
+
298
+
299
+ Input[Input < activation_potential] = 0
300
+ Input[Input > activation_potential] = 1
301
+
302
+ w[Class,:] = Input
303
+
304
+ return w
305
+
306
+ elif is_training == False and activation_potential == None:
307
+
308
+ neural_layer = np.dot(w, Input)
309
+
310
+ return neural_layer
311
+
312
+ elif is_training == False and activation_potential != None:
313
+
314
+ Input[Input < activation_potential] = 0
315
+ Input[Input > activation_potential] = 1
316
+
317
+ neural_layer = np.dot(w, Input)
318
+
319
+ return neural_layer
295
320
 
296
321
 
297
322
 
@@ -368,7 +393,8 @@ def evaluate(
368
393
  x_test, # list[num]: Test input data.
369
394
  y_test, # list[num]: Test labels.
370
395
  show_metrices, # show_metrices (bool): (True or False)
371
- W # list[num]: Weight matrix list of the neural network.
396
+ W, # list[num]: Weight matrix list of the neural network.
397
+ activation_potential=None # activation_potential (float or None): Threshold value for comparison. (optional)
372
398
  ) -> tuple:
373
399
  infoTestModel = """
374
400
  Tests the neural network model with the given test data.
@@ -378,6 +404,7 @@ def evaluate(
378
404
  y_test (list[num]): Test labels.
379
405
  show_metrices (bool): (True or False)
380
406
  W (list[num]): Weight matrix list of the neural network.
407
+ activation_potential (float or None): Threshold value for comparison. (optional)
381
408
 
382
409
  Returns:
383
410
  tuple: A tuple containing the predicted labels and the accuracy of the model.
@@ -408,7 +435,7 @@ def evaluate(
408
435
  neural_layer = normalization(neural_layer)
409
436
 
410
437
  if Layer == 'fex':
411
- neural_layer = fex(neural_layer, W[index], False, None)
438
+ neural_layer = fex(neural_layer, W[index], False, None, activation_potential)
412
439
  elif Layer == 'cat':
413
440
  neural_layer = np.dot(W[index], neural_layer)
414
441
 
@@ -483,7 +510,8 @@ def multiple_evaluate(
483
510
  x_test, # list[num]: Test input data.
484
511
  y_test, # list[num]: Test labels.
485
512
  show_metrices, # show_metrices (bool): Visualize test progress ? (True or False)
486
- MW # list[list[num]]: Weight matrix of the neural network.
513
+ MW, # list[list[num]]: Weight matrix of the neural network.
514
+ activation_potential=None # (float or None): Threshold value for comparison. (optional)
487
515
  ) -> tuple:
488
516
  infoTestModel = """
489
517
  Tests the neural network model with the given test data.
@@ -530,7 +558,7 @@ def multiple_evaluate(
530
558
  neural_layer = normalization(neural_layer)
531
559
 
532
560
  if Layer == 'fex':
533
- neural_layer = fex(neural_layer, W[index], False, None)
561
+ neural_layer = fex(neural_layer, W[index], False, None, activation_potential)
534
562
  elif Layer == 'cat':
535
563
  neural_layer = np.dot(W[index], neural_layer)
536
564
 
@@ -614,7 +642,8 @@ def save_model(model_name,
614
642
  weights_format,
615
643
  model_path,
616
644
  scaler_params,
617
- W
645
+ W,
646
+ activation_potential=None
618
647
  ):
619
648
 
620
649
  infosave_model = """
@@ -630,6 +659,7 @@ def save_model(model_name,
630
659
  model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
631
660
  scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
632
661
  W: Weights of the model.
662
+ activation_potential (float or None): Threshold value for comparison. (optional)
633
663
 
634
664
  Returns:
635
665
  str: Message indicating if the model was saved successfully or encountered an error.
@@ -678,7 +708,8 @@ def save_model(model_name,
678
708
  'WEIGHTS TYPE': weights_type,
679
709
  'WEIGHTS FORMAT': weights_format,
680
710
  'MODEL PATH': model_path,
681
- 'STANDARD SCALER': scaler_params
711
+ 'STANDARD SCALER': scaler_params,
712
+ 'ACTIVATION POTENTIAL': activation_potential
682
713
  }
683
714
  try:
684
715
 
@@ -819,14 +850,18 @@ def predict_model_ssd(Input, model_name, model_path):
819
850
  Arguments:
820
851
  Input (list or ndarray): Input data for the model (single vector or single matrix).
821
852
  model_name (str): Name of the model.
822
- model_path (str): Path where the model is saved.
823
853
  Returns:
824
854
  ndarray: Output from the model.
825
855
  """
826
856
  W, df = load_model(model_name, model_path)
827
857
 
828
858
  scaler_params = str(df['STANDARD SCALER'].iloc[0])
829
-
859
+ activation_potential = str(df['ACTIVATION POTENTIAL'].iloc[0])
860
+
861
+ if activation_potential != 'None':
862
+
863
+ activation_potential = float(activation_potential)
864
+
830
865
  if scaler_params != None:
831
866
 
832
867
  Input = standard_scaler(None, Input, scaler_params)
@@ -845,7 +880,7 @@ def predict_model_ssd(Input, model_name, model_path):
845
880
  neural_layer = normalization(neural_layer)
846
881
 
847
882
  if Layer == 'fex':
848
- neural_layer = fex(neural_layer, W[index], False, None)
883
+ neural_layer = fex(neural_layer, W[index], False, None, activation_potential)
849
884
  elif Layer == 'cat':
850
885
  neural_layer = np.dot(W[index], neural_layer)
851
886
  except:
@@ -857,7 +892,7 @@ def predict_model_ssd(Input, model_name, model_path):
857
892
  return neural_layer
858
893
 
859
894
 
860
- def predict_model_ram(Input, scaler_params, W):
895
+ def predict_model_ram(Input, scaler_params, W, activation_potential=None):
861
896
 
862
897
  infopredict_model_ram = """
863
898
  Function to make a prediction using a divided pruning learning artificial neural network (PLAN).
@@ -867,6 +902,7 @@ def predict_model_ram(Input, scaler_params, W):
867
902
  Input (list or ndarray): Input data for the model (single vector or single matrix).
868
903
  scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
869
904
  W (list of ndarrays): Weights of the model.
905
+ activation_potential (float or None): Threshold value for comparison. (optional)
870
906
 
871
907
  Returns:
872
908
  ndarray: Output from the model.
@@ -890,7 +926,7 @@ def predict_model_ram(Input, scaler_params, W):
890
926
  neural_layer = normalization(neural_layer)
891
927
 
892
928
  if Layer == 'fex':
893
- neural_layer = fex(neural_layer, W[index], False, None)
929
+ neural_layer = fex(neural_layer, W[index], False, None, activation_potential)
894
930
  elif Layer == 'cat':
895
931
  neural_layer = np.dot(W[index], neural_layer)
896
932
 
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 2.4.8
4
- Summary: show_training parameter added for fit function[True, 'final' or any]. Code improvements
3
+ Version: 2.4.9
4
+ Summary: plan_di and plan_bi merged to 'plan'. use 'import plan
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
7
7
  Keywords: model evaluation,classifcation,pruning learning artficial neural networks
@@ -0,0 +1,6 @@
1
+ plan/__init__.py,sha256=gmaz8lnQfl18MbOQwabBUPmShajK5S99jfyY-hQe8tc,502
2
+ plan/plan.py,sha256=vJRHBcDUPJNAE_5PGQBu0z7CiFFDXO7Cal3udtUIUAo,52965
3
+ pyerualjetwork-2.4.9.dist-info/METADATA,sha256=GS9lnkixHFWcYP1L2bxp_LaOTcEC0tFeeiEo9mXB_20,276
4
+ pyerualjetwork-2.4.9.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
5
+ pyerualjetwork-2.4.9.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
6
+ pyerualjetwork-2.4.9.dist-info/RECORD,,
@@ -0,0 +1 @@
1
+ plan
plan_bi/__init__.py DELETED
@@ -1,5 +0,0 @@
1
- # pyerualjetwork/PLAN/__init__.py
2
-
3
- # Bu dosya, plan modülünün ana giriş noktasıdır.
4
-
5
- from .plan_bi import auto_balancer, normalization, Softmax, Sigmoid, Relu, weight_identification, fex, fit, evaluate, save_model, load_model, predict_model_ssd, predict_model_ram, get_weights, get_df, get_preds, get_acc, get_pot, synthetic_augmentation, standard_scaler, multiple_evaluate, encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate, manuel_balancer, weight_normalization