pyerualjetwork 2.1.1__tar.gz → 2.1.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 2.1.1
4
- Summary: Code improvements (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)
3
+ Version: 2.1.2
4
+ Summary: new 'multiple_evaluate' function added. And Code improvements (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
7
7
  Keywords: model evaluation,classifcation,pruning learning artficial neural networks
@@ -2,4 +2,4 @@
2
2
 
3
3
  # Bu dosya, plan modülünün ana giriş noktasıdır.
4
4
 
5
- from .plan_bi import auto_balancer, normalization, Softmax, Sigmoid, Relu, synaptic_pruning, synaptic_dividing, weight_identification, fex, cat, fit, evaluate, save_model, load_model, predict_model_ssd, predict_model_ram, get_weights, get_df, get_preds, get_acc, synthetic_augmentation, standard_scaler
5
+ from .plan_bi import auto_balancer, normalization, Softmax, Sigmoid, Relu, synaptic_pruning, synaptic_dividing, weight_identification, fex, cat, fit, evaluate, save_model, load_model, predict_model_ssd, predict_model_ram, get_weights, get_df, get_preds, get_acc, get_pot, synthetic_augmentation, standard_scaler, multiple_evaluate
@@ -62,8 +62,6 @@ def fit(
62
62
  Divides, Piece = synaptic_dividing(len(class_count),W)
63
63
  trained_W = [1] * len(W)
64
64
  print(Fore.GREEN + "Train Started with 0 ERROR" + Style.RESET_ALL,)
65
- train_predictions = [None] * len(y_train)
66
- true = 0
67
65
  start_time = time.time()
68
66
  for index, inp in enumerate(x_train):
69
67
  uni_start_time = time.time()
@@ -86,27 +84,20 @@ def fit(
86
84
  cs = Divides[int(k)][Windex][0]
87
85
 
88
86
 
89
- W[Windex] = synaptic_pruning(w, cs, 'row', int(k),len(class_count),Piece[Windex],1)
87
+ W[Windex] = synaptic_pruning(w, cs, 'row', int(k), len(class_count), Piece[Windex], True)
90
88
 
91
89
  neural_layer = inp
92
90
 
93
91
  for Lindex, Layer in enumerate(layers):
94
92
 
95
-
93
+ y = np.argmax(y_train[index])
96
94
  neural_layer = normalization(neural_layer)
97
-
98
95
 
99
96
  if Layer == 'fex':
100
- neural_layer,W[Lindex] = fex(neural_layer, W[Lindex], activation_potential, Piece[Windex], 1)
97
+ W[Lindex] = fex(neural_layer, W[Lindex], activation_potential, Piece[Windex], True, y)
101
98
  elif Layer == 'cat':
102
- neural_layer,W[Lindex] = cat(neural_layer, W[Lindex], activation_potential, 1, Piece[Windex])
99
+ W[Lindex] = cat(neural_layer, W[Lindex], True, y)
103
100
 
104
- RealOutput = np.argmax(y_train[index])
105
- PredictedOutput = np.argmax(neural_layer)
106
- if RealOutput == PredictedOutput:
107
- true += 1
108
- acc = true / len(y_train)
109
- train_predictions[index] = PredictedOutput
110
101
 
111
102
  for i, w in enumerate(W):
112
103
  trained_W[i] = trained_W[i] + w
@@ -121,15 +112,16 @@ def fit(
121
112
 
122
113
  if calculating_est < 60:
123
114
  print('\rest......(sec):',calculating_est,'\n',end= "")
124
- print('\rTrain accuracy: ' ,acc ,"\n", end="")
125
-
115
+
126
116
  elif calculating_est > 60 and calculating_est < 3600:
127
117
  print('\rest......(min):',calculating_est/60,'\n',end= "")
128
- print('\rTrain accuracy: ' ,acc ,"\n", end="")
129
-
118
+
130
119
  elif calculating_est > 3600:
131
120
  print('\rest......(h):',calculating_est/3600,'\n',end= "")
132
- print('\rTrain accuracy: ' ,acc ,"\n", end="")
121
+
122
+ print('\rTraining: ' , index, "/", len(x_train),"\n", end="")
123
+
124
+
133
125
 
134
126
  EndTime = time.time()
135
127
 
@@ -145,20 +137,11 @@ def fit(
145
137
 
146
138
  elif calculating_est > 3600:
147
139
  print('Total training time(h): ',calculating_est/3600)
148
-
149
- if acc > 0.8:
150
- print(Fore.GREEN + '\nTotal Train accuracy: ' ,acc, '\n',Style.RESET_ALL)
151
-
152
- elif acc < 0.8 and acc > 0.6:
153
- print(Fore.MAGENTA + '\nTotal Train accuracy: ' ,acc, '\n',Style.RESET_ALL)
154
-
155
- elif acc < 0.6:
156
- print(Fore.RED+ '\nTotal Train accuracy: ' ,acc, '\n',Style.RESET_ALL)
157
140
 
158
141
 
159
142
 
160
143
 
161
- return trained_W,train_predictions,acc
144
+ return trained_W
162
145
 
163
146
  # FUNCTIONS -----
164
147
 
@@ -198,7 +181,7 @@ def synaptic_pruning(
198
181
  Class, # int: Class label for the current training instance.
199
182
  class_count, # int: Total number of classes in the dataset.
200
183
  piece, # int: Which set of neurons will information be transferred to?
201
- is_training # int: 1 or 0
184
+ is_training # bool: Flag indicating if the function is called during training (True or False).
202
185
 
203
186
  ) -> str:
204
187
  infoPruning = """
@@ -210,6 +193,8 @@ def synaptic_pruning(
210
193
  key (str): key for identifying synaptic row or col connections.
211
194
  Class (int): Class label for the current training instance.
212
195
  class_count (int): Total number of classes in the dataset.
196
+ piece (int): Which set of neurons will information be transferred to?
197
+ is_training (bool): Flag indicating if the function is called during training (True or False).
213
198
 
214
199
  Returns:
215
200
  numpy array: Weight matrix.
@@ -222,7 +207,7 @@ def synaptic_pruning(
222
207
 
223
208
  ce = cs / Class # ce(cut_end) = cs(cut_start) / current_class
224
209
 
225
- if is_training == 1:
210
+ if is_training == True:
226
211
 
227
212
  p = piece
228
213
 
@@ -293,10 +278,11 @@ def synaptic_dividing(
293
278
 
294
279
  def fex(
295
280
  Input, # list[num]: Input data.
296
- w, # list[num]: Weight matrix of the neural network.,
281
+ w, # list[num]: Weight matrix of the neural network.
297
282
  activation_potential, # float: Threshold value for comparison.
298
283
  piece, # int: Which set of neurons will information be transferred to?
299
- is_training # int: 1 or 0
284
+ is_training, # bool: Flag indicating if the function is called during training (True or False).
285
+ Class # if is during training then which class(label) ? is isnt then put None.
300
286
  ) -> tuple:
301
287
  """
302
288
  Applies feature extraction process to the input data using synaptic pruning.
@@ -306,24 +292,36 @@ def fex(
306
292
  w (list[num]): Weight matrix of the neural network.
307
293
  activation_potential (float): Threshold value for comparison.
308
294
  piece (int): Which set of neurons will information be transferred to?
295
+ is_training (bool): Flag indicating if the function is called during training (True or False).
296
+ Class (int): if is during training then which class(label) ? is isnt then put None.
309
297
  Returns:
310
298
  tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
311
299
  """
312
300
 
313
-
314
- PruneIndex = np.where(Input < activation_potential)
315
- w = synaptic_pruning(w, PruneIndex, 'col', 0, 0, piece, is_training)
316
-
317
- neural_layer = np.dot(w, Input)
301
+ if is_training == True:
302
+
303
+ PruneIndex = np.where(Input < activation_potential)
304
+ w = synaptic_pruning(w, PruneIndex, 'col', 0, 0, piece, is_training)
305
+
306
+ input_features_index = np.where(w[Class,:] == 1)
307
+ w[Class,input_features_index] = Input[input_features_index]
308
+
309
+ return w
318
310
 
319
- return neural_layer,w
311
+ else:
312
+
313
+ PruneIndex = np.where(Input < activation_potential)
314
+ w = synaptic_pruning(w, PruneIndex, 'col', 0, 0, piece, is_training)
315
+
316
+ neural_layer = np.dot(w, Input)
317
+
318
+ return neural_layer
320
319
 
321
320
  def cat(
322
321
  Input, # list[num]: Input data.
323
322
  w, # list[num]: Weight matrix of the neural network.
324
- activation_potential, # (float): Threshold value for comparison.
325
- is_training, # (int): Flag indicating if the function is called during training (1 for training, 0 otherwise).
326
- piece # (int) Which set of neurons will information be transferred to?
323
+ is_training, # (bool): Flag indicating if the function is called during training (True or False).
324
+ Class # (int): if is during training then which class(label) ? is isnt then put None.
327
325
  ) -> tuple:
328
326
  """
329
327
  Applies categorization process to the input data using synaptic pruning if specified.
@@ -331,23 +329,25 @@ def cat(
331
329
  Args:
332
330
  Input (list[num]): Input data.
333
331
  w (list[num]): Weight matrix of the neural network.
334
- activation_potential (float): Threshold value for comparison.
335
- is_training (int): Flag indicating if the function is called during training (1 for training, 0 otherwise).
336
- piece (int): Which set of neurons will information be transferred to?
332
+ is_training (bool): Flag indicating if the function is called during training (True or False).
333
+ Class (int): if is during training then which class(label) ? is isnt then put None.
337
334
  Returns:
338
335
  tuple: A tuple containing the neural layer (vector) result and the possibly updated weight matrix.
339
336
  """
340
337
 
341
- PruneIndex = np.where(Input == 0)
342
338
 
343
- if is_training == 1:
344
-
345
- w = synaptic_pruning(w, PruneIndex, 'col', 0, 0, piece, is_training)
346
-
347
339
 
348
- neural_layer = np.dot(w, Input)
340
+ if is_training == True:
341
+
342
+ w[Class,Class] += 1
343
+
344
+ return w
345
+
346
+ else:
347
+
348
+ neural_layer = np.dot(w, Input)
349
349
 
350
- return neural_layer,w
350
+ return neural_layer
351
351
 
352
352
 
353
353
  def normalization(
@@ -468,9 +468,9 @@ def evaluate(
468
468
  neural_layer = normalization(neural_layer)
469
469
 
470
470
  if layers[index] == 'fex':
471
- neural_layer = fex(neural_layer, W[index], activation_potential, 0, 0)[0]
471
+ neural_layer = fex(neural_layer, W[index], activation_potential, None, False, None)
472
472
  if layers[index] == 'cat':
473
- neural_layer = cat(neural_layer, W[index], activation_potential, 0, 0)[0]
473
+ neural_layer = cat(neural_layer, W[index], False, None)
474
474
 
475
475
  for i, w in enumerate(Wc):
476
476
  W[i] = np.copy(w)
@@ -552,6 +552,146 @@ def evaluate(
552
552
 
553
553
  return W,TestPredictions,acc
554
554
 
555
+
556
+ def multiple_evaluate(
557
+ x_test, # list[num]: Test input data.
558
+ y_test, # list[num]: Test labels.
559
+ activation_potentials, # float: Threshold value for comparison.
560
+ visualize, # str: visualize Testing procces or not visualize ('y' or 'n')
561
+ MW # list[list[num]]: Weight matrix of the neural network.
562
+ ) -> tuple:
563
+ infoTestModel = """
564
+ Tests the neural network model with the given test data.
565
+
566
+ Args:
567
+ x_test (list[num]): Test input data.
568
+ y_test (list[num]): Test labels.
569
+ activation_potential (float): Input activation potential
570
+ visualize (str): Visualize test progress ? ('y' or 'n')
571
+ MW (list(list[num])): Multiple Weight matrix list of the neural network. (Multiple model testing)
572
+
573
+ Returns:
574
+ tuple: A tuple containing the predicted labels and the accuracy of the model.
575
+ """
576
+
577
+ layers = ['fex','cat']
578
+
579
+ try:
580
+ print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
581
+ start_time = time.time()
582
+ true = 0
583
+ for inpIndex,Input in enumerate(x_test):
584
+
585
+ output_layer = 0
586
+
587
+ for m, Model in enumerate(MW):
588
+
589
+ W = Model
590
+
591
+ Wc = [0] * len(W) # Wc = weight copy
592
+
593
+ TestPredictions = [None] * len(y_test)
594
+ for i, w in enumerate(W):
595
+ Wc[i] = np.copy(w)
596
+
597
+ Input = np.array(Input)
598
+ Input = Input.ravel()
599
+ uni_start_time = time.time()
600
+ neural_layer = Input
601
+
602
+ for index, Layer in enumerate(layers):
603
+
604
+ neural_layer = normalization(neural_layer)
605
+
606
+ if layers[index] == 'fex':
607
+ neural_layer = fex(neural_layer, W[index], activation_potentials[m], None, False, None)
608
+ if layers[index] == 'cat':
609
+ neural_layer = cat(neural_layer, W[index], False, None)
610
+
611
+ output_layer += neural_layer
612
+
613
+ for i, w in enumerate(Wc):
614
+ W[i] = np.copy(w)
615
+ for i, w in enumerate(Wc):
616
+ W[i] = np.copy(w)
617
+ RealOutput = np.argmax(y_test[inpIndex])
618
+ PredictedOutput = np.argmax(output_layer)
619
+ if RealOutput == PredictedOutput:
620
+ true += 1
621
+ acc = true / len(y_test)
622
+ TestPredictions[inpIndex] = PredictedOutput
623
+
624
+ if visualize == 'y':
625
+
626
+ y_testVisual = np.copy(y_test)
627
+ y_testVisual = np.argmax(y_testVisual, axis=1)
628
+
629
+ plt.figure(figsize=(12, 6))
630
+ sns.kdeplot(y_testVisual, label='Real Outputs', fill=True)
631
+ sns.kdeplot(TestPredictions, label='Predictions', fill=True)
632
+ plt.legend()
633
+ plt.xlabel('Class')
634
+ plt.ylabel('Data size')
635
+ plt.title('Predictions and Real Outputs for Testing KDE Plot')
636
+ plt.show()
637
+
638
+ if inpIndex + 1 != len(x_test):
639
+
640
+ plt.close('all')
641
+
642
+ uni_end_time = time.time()
643
+
644
+ calculating_est = round((uni_end_time - uni_start_time) * (len(x_test) - inpIndex),3)
645
+
646
+ if calculating_est < 60:
647
+ print('\rest......(sec):',calculating_est,'\n',end= "")
648
+ print('\rTest accuracy: ' ,acc ,"\n", end="")
649
+
650
+ elif calculating_est > 60 and calculating_est < 3600:
651
+ print('\rest......(min):',calculating_est/60,'\n',end= "")
652
+ print('\rTest accuracy: ' ,acc ,"\n", end="")
653
+
654
+ elif calculating_est > 3600:
655
+ print('\rest......(h):',calculating_est/3600,'\n',end= "")
656
+ print('\rTest accuracy: ' ,acc ,"\n", end="")
657
+
658
+ EndTime = time.time()
659
+ for i, w in enumerate(Wc):
660
+ W[i] = np.copy(w)
661
+
662
+ calculating_est = round(EndTime - start_time,2)
663
+
664
+ print(Fore.GREEN + "\nTest Finished with 0 ERROR\n")
665
+
666
+ if calculating_est < 60:
667
+ print('Total testing time(sec): ',calculating_est)
668
+
669
+ elif calculating_est > 60 and calculating_est < 3600:
670
+ print('Total testing time(min): ',calculating_est/60)
671
+
672
+ elif calculating_est > 3600:
673
+ print('Total testing time(h): ',calculating_est/3600)
674
+
675
+ if acc >= 0.8:
676
+ print(Fore.GREEN + '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
677
+
678
+ elif acc < 0.8 and acc > 0.6:
679
+ print(Fore.MAGENTA + '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
680
+
681
+ elif acc <= 0.6:
682
+ print(Fore.RED+ '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
683
+
684
+
685
+
686
+ except:
687
+
688
+ print(Fore.RED + "ERROR: Testing model parameters like 'activation_potential' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: evaluate" + infoTestModel + Style.RESET_ALL)
689
+ return 'e'
690
+
691
+
692
+
693
+ return W,TestPredictions,acc
694
+
555
695
  def save_model(model_name,
556
696
  model_type,
557
697
  class_count,
@@ -791,9 +931,9 @@ def predict_model_ssd(Input,model_name,model_path):
791
931
  neural_layer = normalization(neural_layer)
792
932
 
793
933
  if layers[index] == 'fex':
794
- neural_layer = fex(neural_layer, W[index], activation_potential,0, 0)[0]
934
+ neural_layer = fex(neural_layer, W[index], activation_potential, None, False, None)
795
935
  if layers[index] == 'cat':
796
- neural_layer = cat(neural_layer, W[index], activation_potential, 0, 0)[0]
936
+ neural_layer = cat(neural_layer, W[index], False, None)
797
937
  except:
798
938
  print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: predict_model_ssd" + infopredict_model_ssd + Style.RESET_ALL)
799
939
  return 'e'
@@ -831,9 +971,9 @@ def predict_model_ram(Input,activation_potential,W):
831
971
  neural_layer = normalization(neural_layer)
832
972
 
833
973
  if layers[index] == 'fex':
834
- neural_layer = fex(neural_layer, W[index], activation_potential,0, 0)[0]
974
+ neural_layer = fex(neural_layer, W[index], activation_potential, None, False, None)
835
975
  if layers[index] == 'cat':
836
- neural_layer = cat(neural_layer, W[index], activation_potential, 0, 0)[0]
976
+ neural_layer = cat(neural_layer, W[index], False, None)
837
977
 
838
978
  except:
839
979
  print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." + infopredict_model_ram + Style.RESET_ALL)
@@ -973,4 +1113,5 @@ def get_acc():
973
1113
 
974
1114
  def get_pot():
975
1115
 
976
- return 1
1116
+ return 1
1117
+
@@ -2,4 +2,4 @@
2
2
 
3
3
  # Bu dosya, plan modülünün ana giriş noktasıdır.
4
4
 
5
- from .plan_di import auto_balancer, normalization, Softmax, Sigmoid, Relu, synaptic_pruning, synaptic_dividing, weight_identification, fex, cat, fit, evaluate, save_model, load_model, predict_model_ssd, predict_model_ram, get_weights, get_df, get_preds, get_acc, synthetic_augmentation, standard_scaler
5
+ from .plan_di import auto_balancer, normalization, Softmax, Sigmoid, Relu, synaptic_pruning, synaptic_dividing, weight_identification, fex, cat, fit, evaluate, save_model, load_model, predict_model_ssd, predict_model_ram, get_weights, get_df, get_preds, get_acc, synthetic_augmentation, standard_scaler, multiple_evaluate
@@ -55,9 +55,7 @@ def fit(
55
55
  W = weight_identification(len(layers) - 1,len(class_count),neurons,x_train_size)
56
56
  Divides, Piece = synaptic_dividing(len(class_count),W)
57
57
  trained_W = [1] * len(W)
58
- print(Fore.GREEN + "Train Started with 0 ERROR" + Style.RESET_ALL,)
59
- train_predictions = [None] * len(y_train)
60
- true = 0
58
+ print(Fore.GREEN + "Train Started with 0 ERROR" + Style.RESET_ALL)
61
59
  start_time = time.time()
62
60
  for index, inp in enumerate(x_train):
63
61
  uni_start_time = time.time()
@@ -80,7 +78,7 @@ def fit(
80
78
  cs = Divides[int(k)][Windex][0]
81
79
 
82
80
 
83
- W[Windex] = synaptic_pruning(w, cs, 'row', int(k),len(class_count),Piece[Windex],1)
81
+ W[Windex] = synaptic_pruning(w, cs, 'row', int(k), len(class_count), Piece[Windex], True)
84
82
 
85
83
  neural_layer = inp
86
84
 
@@ -89,24 +87,15 @@ def fit(
89
87
 
90
88
  neural_layer = normalization(neural_layer)
91
89
 
92
-
90
+ y = np.argmax(y_train[index])
93
91
  if Layer == 'fex':
94
- y = np.argmax(y_train[index])
95
-
96
- neural_layer,W[Lindex] = fex(neural_layer, W[Lindex], y , 1)
92
+ W[Lindex] = fex(neural_layer, W[Lindex], True, y)
97
93
  elif Layer == 'cat':
98
- neural_layer,W[Lindex] = cat(neural_layer, W[Lindex], 1, Piece[Windex])
94
+ W[Lindex] = cat(neural_layer, W[Lindex], True, y)
99
95
 
100
- RealOutput = np.argmax(y_train[index])
101
- PredictedOutput = np.argmax(neural_layer)
102
- if RealOutput == PredictedOutput:
103
- true += 1
104
- acc = true / len(y_train)
105
- train_predictions[index] = PredictedOutput
106
96
 
107
97
  for i, w in enumerate(W):
108
98
  trained_W[i] = trained_W[i] + w
109
-
110
99
 
111
100
  W = weight_identification(len(layers) - 1, len(class_count), neurons, x_train_size)
112
101
 
@@ -117,44 +106,32 @@ def fit(
117
106
 
118
107
  if calculating_est < 60:
119
108
  print('\rest......(sec):',calculating_est,'\n',end= "")
120
- print('\rTrain accuracy: ' ,acc ,"\n", end="")
121
-
109
+
122
110
  elif calculating_est > 60 and calculating_est < 3600:
123
111
  print('\rest......(min):',calculating_est/60,'\n',end= "")
124
- print('\rTrain accuracy: ' ,acc ,"\n", end="")
125
-
112
+
126
113
  elif calculating_est > 3600:
127
114
  print('\rest......(h):',calculating_est/3600,'\n',end= "")
128
- print('\rTrain accuracy: ' ,acc ,"\n", end="")
115
+
116
+ print('\rTraining: ' , index, "/", len(x_train),"\n", end="")
129
117
 
130
118
  EndTime = time.time()
131
-
119
+
132
120
  calculating_est = round(EndTime - start_time,2)
133
-
134
- print(Fore.GREEN + " \nTrain Finished with 0 ERROR\n")
135
-
121
+
122
+ print(Fore.GREEN + " \nTrain Finished with 0 ERROR\n" + Style.RESET_ALL)
123
+
136
124
  if calculating_est < 60:
137
- print('Total training time(sec): ',calculating_est)
138
-
139
- elif calculating_est > 60 and calculating_est < 3600:
140
- print('Total training time(min): ',calculating_est/60)
141
-
142
- elif calculating_est > 3600:
143
- print('Total training time(h): ',calculating_est/3600)
144
-
145
- if acc > 0.8:
146
- print(Fore.GREEN + '\nTotal Train accuracy: ' ,acc, '\n',Style.RESET_ALL)
147
-
148
- elif acc < 0.8 and acc > 0.6:
149
- print(Fore.MAGENTA + '\nTotal Train accuracy: ' ,acc, '\n',Style.RESET_ALL)
150
-
151
- elif acc < 0.6:
152
- print(Fore.RED+ '\nTotal Train accuracy: ' ,acc, '\n',Style.RESET_ALL)
125
+ print('Total training time(sec): ',calculating_est)
153
126
 
127
+ elif calculating_est > 60 and calculating_est < 3600:
128
+ print('Total training time(min): ',calculating_est/60)
154
129
 
130
+ elif calculating_est > 3600:
131
+ print('Total training time(h): ',calculating_est/3600)
155
132
 
156
133
 
157
- return trained_W,train_predictions,acc
134
+ return trained_W
158
135
 
159
136
  # FUNCTIONS -----
160
137
 
@@ -194,7 +171,7 @@ def synaptic_pruning(
194
171
  Class, # int: Class label for the current training instance.
195
172
  class_count, # int: Total number of classes in the dataset.
196
173
  piece, # int: Which set of neurons will information be transferred to?
197
- is_training # int: 1 or 0
174
+ is_training # bool: Flag indicating if the function is called during training (True or False).
198
175
 
199
176
  ) -> str:
200
177
  infoPruning = """
@@ -207,7 +184,7 @@ def synaptic_pruning(
207
184
  Class (int): Class label for the current training instance.
208
185
  class_count (int): Total number of classes in the dataset.
209
186
  piece (int): Which set of neurons will information be transferred to?
210
- is_training (int): 1 or 0
187
+ is_training (bool): Flag indicating if the function is called during training (True or False).
211
188
 
212
189
  Returns:
213
190
  numpy array: Weight matrix.
@@ -222,7 +199,7 @@ def synaptic_pruning(
222
199
 
223
200
  ce = cs / Class # ce(cut_end) = cs(cut_start) / current_class
224
201
 
225
- if is_training == 1:
202
+ if is_training == True:
226
203
 
227
204
  p = piece
228
205
 
@@ -295,9 +272,9 @@ def synaptic_dividing(
295
272
 
296
273
  def fex(
297
274
  Input, # list[num]: Input data.
298
- w, # num: Weight matrix of the neural network.,
299
- Class, # int: Which class is, if training.
300
- is_training # int: 1 or 0
275
+ w, # num: Weight matrix of the neural network.
276
+ is_training, # bool: Flag indicating if the function is called during training (True or False).
277
+ Class # int: Which class is, if training.
301
278
  ) -> tuple:
302
279
  """
303
280
  Applies feature extraction process to the input data using synaptic pruning.
@@ -305,50 +282,56 @@ def fex(
305
282
  Args:
306
283
  Input (num): Input data.
307
284
  w (num): Weight matrix of the neural network.
308
- Class (int): Which class is, if training.
309
- is_training (int): Flag indicating if the function is called during training (1 for training, 0 otherwise).
285
+ is_training (bool): Flag indicating if the function is called during training (True or False).
286
+ Class (int): if is during training then which class(label) ? is isnt then put None.
310
287
 
311
288
  Returns:
312
289
  tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
313
290
  """
314
291
 
315
- if is_training == 1:
292
+ if is_training == True:
316
293
 
317
294
  w[Class,:] = Input
318
295
 
319
- neural_layer = np.dot(w, Input)
296
+ return w
320
297
 
321
- return neural_layer,w
298
+ else:
299
+
300
+ neural_layer = np.dot(w, Input)
301
+
302
+ return neural_layer
322
303
 
323
304
  def cat(
324
305
  Input, # list[num]: Input data.
325
306
  w, # list[num]: Weight matrix of the neural network.
326
- is_training, # int: Flag indicating if the function is called during training (1 for training, 0 otherwise).
327
- piece # int Which set of neurons will information be transferred to?
307
+ is_training, # (bool): Flag indicating if the function is called during training (True or False).
308
+ Class
328
309
  ) -> tuple:
329
310
  """
330
311
  Applies categorization process to the input data using synaptic pruning if specified.
331
312
 
332
313
  Args:
333
314
  Input (list[num]): Input data.
334
- w (num): Weight matrix of the neural network.
335
- is_training (int): Flag indicating if the function is called during training (1 for training, 0 otherwise).
336
- piece (int): Which set of neurons will information be transferred to?
337
- ) -> tuple:
315
+ w (list[num]): Weight matrix of the neural network.
316
+ is_training (bool): Flag indicating if the function is called during training (True or False).
317
+ Class (int): if is during training then which class(label) ? is isnt then put None.
338
318
  Returns:
339
319
  tuple: A tuple containing the neural layer (vector) result and the possibly updated weight matrix.
340
320
  """
341
321
 
342
- PruneIndex = np.where(Input == 0)
343
322
 
344
- if is_training == 1:
345
-
346
- w = synaptic_pruning(w, PruneIndex, 'col', 0, 0, piece, is_training)
347
-
348
323
 
349
- neural_layer = np.dot(w, Input)
324
+ if is_training == True:
325
+
326
+ w[Class,Class] += 1
327
+
328
+ return w
329
+
330
+ else:
331
+
332
+ neural_layer = np.dot(w, Input)
350
333
 
351
- return neural_layer,w
334
+ return neural_layer
352
335
 
353
336
 
354
337
  def normalization(
@@ -467,10 +450,10 @@ def evaluate(
467
450
 
468
451
  neural_layer = normalization(neural_layer)
469
452
 
470
- if layers[index] == 'fex':
471
- neural_layer = fex(neural_layer, W[index], 0, 0)[0]
472
- if layers[index] == 'cat':
473
- neural_layer = cat(neural_layer, W[index], 0, 0)[0]
453
+ if Layer == 'fex':
454
+ neural_layer = fex(neural_layer, W[index], False, None)
455
+ elif Layer == 'cat':
456
+ neural_layer = cat(neural_layer, W[index], False, None)
474
457
 
475
458
  for i, w in enumerate(Wc):
476
459
  W[i] = np.copy(w)
@@ -552,6 +535,144 @@ def evaluate(
552
535
 
553
536
  return W,TestPredictions,acc
554
537
 
538
+ def multiple_evaluate(
539
+ x_test, # list[num]: Test input data.
540
+ y_test, # list[num]: Test labels.
541
+ visualize, # str: visualize Testing procces or not visualize ('y' or 'n')
542
+ MW # list[list[num]]: Weight matrix of the neural network.
543
+ ) -> tuple:
544
+ infoTestModel = """
545
+ Tests the neural network model with the given test data.
546
+
547
+ Args:
548
+ x_test (list[num]): Test input data.
549
+ y_test (list[num]): Test labels.
550
+ activation_potential (float): Input activation potential
551
+ visualize (str): Visualize test progress ? ('y' or 'n')
552
+ MW (list(list[num])): Multiple Weight matrix list of the neural network. (Multiple model testing)
553
+
554
+ Returns:
555
+ tuple: A tuple containing the predicted labels and the accuracy of the model.
556
+ """
557
+
558
+ layers = ['fex','cat']
559
+
560
+ try:
561
+ print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
562
+ start_time = time.time()
563
+ true = 0
564
+ for inpIndex,Input in enumerate(x_test):
565
+
566
+ output_layer = 0
567
+
568
+ for m, Model in enumerate(MW):
569
+
570
+ W = Model
571
+
572
+ Wc = [0] * len(W) # Wc = weight copy
573
+
574
+ TestPredictions = [None] * len(y_test)
575
+ for i, w in enumerate(W):
576
+ Wc[i] = np.copy(w)
577
+
578
+ Input = np.array(Input)
579
+ Input = Input.ravel()
580
+ uni_start_time = time.time()
581
+ neural_layer = Input
582
+
583
+ for index, Layer in enumerate(layers):
584
+
585
+ neural_layer = normalization(neural_layer)
586
+
587
+ if Layer == 'fex':
588
+ neural_layer = fex(neural_layer, W[index], False, None)
589
+ elif Layer == 'cat':
590
+ neural_layer = cat(neural_layer, W[index], False, None)
591
+
592
+ output_layer += neural_layer
593
+
594
+ for i, w in enumerate(Wc):
595
+ W[i] = np.copy(w)
596
+ for i, w in enumerate(Wc):
597
+ W[i] = np.copy(w)
598
+ RealOutput = np.argmax(y_test[inpIndex])
599
+ PredictedOutput = np.argmax(output_layer)
600
+ if RealOutput == PredictedOutput:
601
+ true += 1
602
+ acc = true / len(y_test)
603
+ TestPredictions[inpIndex] = PredictedOutput
604
+
605
+ if visualize == 'y':
606
+
607
+ y_testVisual = np.copy(y_test)
608
+ y_testVisual = np.argmax(y_testVisual, axis=1)
609
+
610
+ plt.figure(figsize=(12, 6))
611
+ sns.kdeplot(y_testVisual, label='Real Outputs', fill=True)
612
+ sns.kdeplot(TestPredictions, label='Predictions', fill=True)
613
+ plt.legend()
614
+ plt.xlabel('Class')
615
+ plt.ylabel('Data size')
616
+ plt.title('Predictions and Real Outputs for Testing KDE Plot')
617
+ plt.show()
618
+
619
+ if inpIndex + 1 != len(x_test):
620
+
621
+ plt.close('all')
622
+
623
+ uni_end_time = time.time()
624
+
625
+ calculating_est = round((uni_end_time - uni_start_time) * (len(x_test) - inpIndex),3)
626
+
627
+ if calculating_est < 60:
628
+ print('\rest......(sec):',calculating_est,'\n',end= "")
629
+ print('\rTest accuracy: ' ,acc ,"\n", end="")
630
+
631
+ elif calculating_est > 60 and calculating_est < 3600:
632
+ print('\rest......(min):',calculating_est/60,'\n',end= "")
633
+ print('\rTest accuracy: ' ,acc ,"\n", end="")
634
+
635
+ elif calculating_est > 3600:
636
+ print('\rest......(h):',calculating_est/3600,'\n',end= "")
637
+ print('\rTest accuracy: ' ,acc ,"\n", end="")
638
+
639
+ EndTime = time.time()
640
+ for i, w in enumerate(Wc):
641
+ W[i] = np.copy(w)
642
+
643
+ calculating_est = round(EndTime - start_time,2)
644
+
645
+ print(Fore.GREEN + "\nTest Finished with 0 ERROR\n")
646
+
647
+ if calculating_est < 60:
648
+ print('Total testing time(sec): ',calculating_est)
649
+
650
+ elif calculating_est > 60 and calculating_est < 3600:
651
+ print('Total testing time(min): ',calculating_est/60)
652
+
653
+ elif calculating_est > 3600:
654
+ print('Total testing time(h): ',calculating_est/3600)
655
+
656
+ if acc >= 0.8:
657
+ print(Fore.GREEN + '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
658
+
659
+ elif acc < 0.8 and acc > 0.6:
660
+ print(Fore.MAGENTA + '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
661
+
662
+ elif acc <= 0.6:
663
+ print(Fore.RED+ '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
664
+
665
+
666
+
667
+ except:
668
+
669
+ print(Fore.RED + "ERROR: Testing model parameters like 'activation_potential' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: evaluate" + infoTestModel + Style.RESET_ALL)
670
+ return 'e'
671
+
672
+
673
+
674
+ return W,TestPredictions,acc
675
+
555
676
  def save_model(model_name,
556
677
  model_type,
557
678
  class_count,
@@ -787,10 +908,10 @@ def predict_model_ssd(Input,model_name,model_path):
787
908
 
788
909
  neural_layer = normalization(neural_layer)
789
910
 
790
- if layers[index] == 'fex':
791
- neural_layer = fex(neural_layer, W[index],0, 0)[0]
792
- if layers[index] == 'cat':
793
- neural_layer = cat(neural_layer, W[index], 0, 0)[0]
911
+ if Layer == 'fex':
912
+ neural_layer = fex(neural_layer, W[index], False, None)
913
+ elif Layer == 'cat':
914
+ neural_layer = cat(neural_layer, W[index], False, None)
794
915
  except:
795
916
  print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: predict_model_ssd" + infopredict_model_ssd + Style.RESET_ALL)
796
917
  return 'e'
@@ -827,10 +948,10 @@ def predict_model_ram(Input,W):
827
948
 
828
949
  neural_layer = normalization(neural_layer)
829
950
 
830
- if layers[index] == 'fex':
831
- neural_layer = fex(neural_layer, W[index],0, 0)[0]
832
- if layers[index] == 'cat':
833
- neural_layer = cat(neural_layer, W[index], 0, 0)[0]
951
+ if Layer == 'fex':
952
+ neural_layer = fex(neural_layer, W[index], False, None)
953
+ elif Layer == 'cat':
954
+ neural_layer = cat(neural_layer, W[index], False, None)
834
955
 
835
956
  except:
836
957
  print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." + infopredict_model_ram + Style.RESET_ALL)
@@ -966,8 +1087,4 @@ def get_preds():
966
1087
 
967
1088
  def get_acc():
968
1089
 
969
- return 2
970
-
971
- def get_pot():
972
-
973
- return 1
1090
+ return 2
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 2.1.1
4
- Summary: Code improvements (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)
3
+ Version: 2.1.2
4
+ Summary: new 'multiple_evaluate' function added. And Code improvements (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
7
7
  Keywords: model evaluation,classifcation,pruning learning artficial neural networks
@@ -5,10 +5,10 @@ from setuptools import setup, find_packages
5
5
  setup(
6
6
 
7
7
  name = "pyerualjetwork",
8
- version = "2.1.1",
8
+ version = "2.1.2",
9
9
  author = "Hasan Can Beydili",
10
10
  author_email = "tchasancan@gmail.com",
11
- description= " Code improvements (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)",
11
+ description= " new 'multiple_evaluate' function added. And Code improvements (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)",
12
12
  packages = find_packages(),
13
13
  keywords = ["model evaluation", "classifcation", 'pruning learning artficial neural networks'],
14
14
 
File without changes