pyerualjetwork 2.1.1__py3-none-any.whl → 2.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- plan_bi/__init__.py +1 -1
- plan_bi/plan_bi.py +572 -144
- plan_di/__init__.py +1 -1
- plan_di/plan_di.py +872 -469
- pyerualjetwork-2.1.3.dist-info/METADATA +8 -0
- pyerualjetwork-2.1.3.dist-info/RECORD +8 -0
- pyerualjetwork-2.1.1.dist-info/METADATA +0 -8
- pyerualjetwork-2.1.1.dist-info/RECORD +0 -8
- {pyerualjetwork-2.1.1.dist-info → pyerualjetwork-2.1.3.dist-info}/WHEEL +0 -0
- {pyerualjetwork-2.1.1.dist-info → pyerualjetwork-2.1.3.dist-info}/top_level.txt +0 -0
plan_bi/plan_bi.py
CHANGED
@@ -62,8 +62,6 @@ def fit(
|
|
62
62
|
Divides, Piece = synaptic_dividing(len(class_count),W)
|
63
63
|
trained_W = [1] * len(W)
|
64
64
|
print(Fore.GREEN + "Train Started with 0 ERROR" + Style.RESET_ALL,)
|
65
|
-
train_predictions = [None] * len(y_train)
|
66
|
-
true = 0
|
67
65
|
start_time = time.time()
|
68
66
|
for index, inp in enumerate(x_train):
|
69
67
|
uni_start_time = time.time()
|
@@ -86,27 +84,20 @@ def fit(
|
|
86
84
|
cs = Divides[int(k)][Windex][0]
|
87
85
|
|
88
86
|
|
89
|
-
W[Windex] = synaptic_pruning(w, cs, 'row', int(k),len(class_count),Piece[Windex],
|
87
|
+
W[Windex] = synaptic_pruning(w, cs, 'row', int(k), len(class_count), Piece[Windex], True)
|
90
88
|
|
91
89
|
neural_layer = inp
|
92
90
|
|
93
91
|
for Lindex, Layer in enumerate(layers):
|
94
92
|
|
95
|
-
|
93
|
+
y = np.argmax(y_train[index])
|
96
94
|
neural_layer = normalization(neural_layer)
|
97
|
-
|
98
95
|
|
99
96
|
if Layer == 'fex':
|
100
|
-
|
97
|
+
W[Lindex] = fex(neural_layer, W[Lindex], activation_potential, Piece[Windex], True, y)
|
101
98
|
elif Layer == 'cat':
|
102
|
-
|
99
|
+
W[Lindex] = cat(neural_layer, W[Lindex], True, y)
|
103
100
|
|
104
|
-
RealOutput = np.argmax(y_train[index])
|
105
|
-
PredictedOutput = np.argmax(neural_layer)
|
106
|
-
if RealOutput == PredictedOutput:
|
107
|
-
true += 1
|
108
|
-
acc = true / len(y_train)
|
109
|
-
train_predictions[index] = PredictedOutput
|
110
101
|
|
111
102
|
for i, w in enumerate(W):
|
112
103
|
trained_W[i] = trained_W[i] + w
|
@@ -121,15 +112,16 @@ def fit(
|
|
121
112
|
|
122
113
|
if calculating_est < 60:
|
123
114
|
print('\rest......(sec):',calculating_est,'\n',end= "")
|
124
|
-
|
125
|
-
|
115
|
+
|
126
116
|
elif calculating_est > 60 and calculating_est < 3600:
|
127
117
|
print('\rest......(min):',calculating_est/60,'\n',end= "")
|
128
|
-
|
129
|
-
|
118
|
+
|
130
119
|
elif calculating_est > 3600:
|
131
120
|
print('\rest......(h):',calculating_est/3600,'\n',end= "")
|
132
|
-
|
121
|
+
|
122
|
+
print('\rTraining: ' , index, "/", len(x_train),"\n", end="")
|
123
|
+
|
124
|
+
|
133
125
|
|
134
126
|
EndTime = time.time()
|
135
127
|
|
@@ -145,20 +137,11 @@ def fit(
|
|
145
137
|
|
146
138
|
elif calculating_est > 3600:
|
147
139
|
print('Total training time(h): ',calculating_est/3600)
|
148
|
-
|
149
|
-
if acc > 0.8:
|
150
|
-
print(Fore.GREEN + '\nTotal Train accuracy: ' ,acc, '\n',Style.RESET_ALL)
|
151
|
-
|
152
|
-
elif acc < 0.8 and acc > 0.6:
|
153
|
-
print(Fore.MAGENTA + '\nTotal Train accuracy: ' ,acc, '\n',Style.RESET_ALL)
|
154
|
-
|
155
|
-
elif acc < 0.6:
|
156
|
-
print(Fore.RED+ '\nTotal Train accuracy: ' ,acc, '\n',Style.RESET_ALL)
|
157
140
|
|
158
141
|
|
159
142
|
|
160
143
|
|
161
|
-
return trained_W
|
144
|
+
return trained_W
|
162
145
|
|
163
146
|
# FUNCTIONS -----
|
164
147
|
|
@@ -198,7 +181,7 @@ def synaptic_pruning(
|
|
198
181
|
Class, # int: Class label for the current training instance.
|
199
182
|
class_count, # int: Total number of classes in the dataset.
|
200
183
|
piece, # int: Which set of neurons will information be transferred to?
|
201
|
-
is_training #
|
184
|
+
is_training # bool: Flag indicating if the function is called during training (True or False).
|
202
185
|
|
203
186
|
) -> str:
|
204
187
|
infoPruning = """
|
@@ -210,6 +193,8 @@ def synaptic_pruning(
|
|
210
193
|
key (str): key for identifying synaptic row or col connections.
|
211
194
|
Class (int): Class label for the current training instance.
|
212
195
|
class_count (int): Total number of classes in the dataset.
|
196
|
+
piece (int): Which set of neurons will information be transferred to?
|
197
|
+
is_training (bool): Flag indicating if the function is called during training (True or False).
|
213
198
|
|
214
199
|
Returns:
|
215
200
|
numpy array: Weight matrix.
|
@@ -222,7 +207,7 @@ def synaptic_pruning(
|
|
222
207
|
|
223
208
|
ce = cs / Class # ce(cut_end) = cs(cut_start) / current_class
|
224
209
|
|
225
|
-
if is_training ==
|
210
|
+
if is_training == True:
|
226
211
|
|
227
212
|
p = piece
|
228
213
|
|
@@ -293,10 +278,11 @@ def synaptic_dividing(
|
|
293
278
|
|
294
279
|
def fex(
|
295
280
|
Input, # list[num]: Input data.
|
296
|
-
w, # list[num]: Weight matrix of the neural network
|
281
|
+
w, # list[num]: Weight matrix of the neural network.
|
297
282
|
activation_potential, # float: Threshold value for comparison.
|
298
283
|
piece, # int: Which set of neurons will information be transferred to?
|
299
|
-
is_training #
|
284
|
+
is_training, # bool: Flag indicating if the function is called during training (True or False).
|
285
|
+
Class # if is during training then which class(label) ? is isnt then put None.
|
300
286
|
) -> tuple:
|
301
287
|
"""
|
302
288
|
Applies feature extraction process to the input data using synaptic pruning.
|
@@ -306,24 +292,36 @@ def fex(
|
|
306
292
|
w (list[num]): Weight matrix of the neural network.
|
307
293
|
activation_potential (float): Threshold value for comparison.
|
308
294
|
piece (int): Which set of neurons will information be transferred to?
|
295
|
+
is_training (bool): Flag indicating if the function is called during training (True or False).
|
296
|
+
Class (int): if is during training then which class(label) ? is isnt then put None.
|
309
297
|
Returns:
|
310
298
|
tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
|
311
299
|
"""
|
312
300
|
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
301
|
+
if is_training == True:
|
302
|
+
|
303
|
+
PruneIndex = np.where(Input < activation_potential)
|
304
|
+
w = synaptic_pruning(w, PruneIndex, 'col', 0, 0, piece, is_training)
|
305
|
+
|
306
|
+
input_features_index = np.where(w[Class,:] == 1)
|
307
|
+
w[Class,input_features_index] = Input[input_features_index]
|
308
|
+
|
309
|
+
return w
|
318
310
|
|
319
|
-
|
311
|
+
else:
|
312
|
+
|
313
|
+
PruneIndex = np.where(Input < activation_potential)
|
314
|
+
w = synaptic_pruning(w, PruneIndex, 'col', 0, 0, piece, is_training)
|
315
|
+
|
316
|
+
neural_layer = np.dot(w, Input)
|
317
|
+
|
318
|
+
return neural_layer
|
320
319
|
|
321
320
|
def cat(
|
322
321
|
Input, # list[num]: Input data.
|
323
322
|
w, # list[num]: Weight matrix of the neural network.
|
324
|
-
|
325
|
-
|
326
|
-
piece # (int) Which set of neurons will information be transferred to?
|
323
|
+
is_training, # (bool): Flag indicating if the function is called during training (True or False).
|
324
|
+
Class # (int): if is during training then which class(label) ? is isnt then put None.
|
327
325
|
) -> tuple:
|
328
326
|
"""
|
329
327
|
Applies categorization process to the input data using synaptic pruning if specified.
|
@@ -331,23 +329,25 @@ def cat(
|
|
331
329
|
Args:
|
332
330
|
Input (list[num]): Input data.
|
333
331
|
w (list[num]): Weight matrix of the neural network.
|
334
|
-
|
335
|
-
|
336
|
-
piece (int): Which set of neurons will information be transferred to?
|
332
|
+
is_training (bool): Flag indicating if the function is called during training (True or False).
|
333
|
+
Class (int): if is during training then which class(label) ? is isnt then put None.
|
337
334
|
Returns:
|
338
335
|
tuple: A tuple containing the neural layer (vector) result and the possibly updated weight matrix.
|
339
336
|
"""
|
340
337
|
|
341
|
-
PruneIndex = np.where(Input == 0)
|
342
338
|
|
343
|
-
if is_training == 1:
|
344
|
-
|
345
|
-
w = synaptic_pruning(w, PruneIndex, 'col', 0, 0, piece, is_training)
|
346
|
-
|
347
339
|
|
348
|
-
|
340
|
+
if is_training == True:
|
341
|
+
|
342
|
+
w[Class,Class] += 1
|
343
|
+
|
344
|
+
return w
|
345
|
+
|
346
|
+
else:
|
347
|
+
|
348
|
+
neural_layer = np.dot(w, Input)
|
349
349
|
|
350
|
-
|
350
|
+
return neural_layer
|
351
351
|
|
352
352
|
|
353
353
|
def normalization(
|
@@ -426,8 +426,8 @@ def Relu(
|
|
426
426
|
def evaluate(
|
427
427
|
x_test, # list[num]: Test input data.
|
428
428
|
y_test, # list[num]: Test labels.
|
429
|
-
activation_potential, # float:
|
430
|
-
|
429
|
+
activation_potential, # float: Input activation potential
|
430
|
+
show_metrices, # (bool): (True or False)
|
431
431
|
W # list[num]: Weight matrix of the neural network.
|
432
432
|
) -> tuple:
|
433
433
|
infoTestModel = """
|
@@ -437,7 +437,7 @@ def evaluate(
|
|
437
437
|
x_test (list[num]): Test input data.
|
438
438
|
y_test (list[num]): Test labels.
|
439
439
|
activation_potential (float): Input activation potential
|
440
|
-
|
440
|
+
show_metrices (bool): (True or False)
|
441
441
|
W (list[num]): Weight matrix list of the neural network.
|
442
442
|
|
443
443
|
Returns:
|
@@ -450,7 +450,8 @@ def evaluate(
|
|
450
450
|
try:
|
451
451
|
Wc = [0] * len(W) # Wc = weight copy
|
452
452
|
true = 0
|
453
|
-
|
453
|
+
y_preds = [-1] * len(y_test)
|
454
|
+
acc_list = []
|
454
455
|
for i, w in enumerate(W):
|
455
456
|
Wc[i] = np.copy(w)
|
456
457
|
print('\rCopying weights.....',i+1,'/',len(W),end = "")
|
@@ -468,9 +469,9 @@ def evaluate(
|
|
468
469
|
neural_layer = normalization(neural_layer)
|
469
470
|
|
470
471
|
if layers[index] == 'fex':
|
471
|
-
neural_layer = fex(neural_layer, W[index], activation_potential,
|
472
|
+
neural_layer = fex(neural_layer, W[index], activation_potential, None, False, None)
|
472
473
|
if layers[index] == 'cat':
|
473
|
-
neural_layer = cat(neural_layer, W[index],
|
474
|
+
neural_layer = cat(neural_layer, W[index], False, None)
|
474
475
|
|
475
476
|
for i, w in enumerate(Wc):
|
476
477
|
W[i] = np.copy(w)
|
@@ -479,25 +480,9 @@ def evaluate(
|
|
479
480
|
if RealOutput == PredictedOutput:
|
480
481
|
true += 1
|
481
482
|
acc = true / len(y_test)
|
482
|
-
|
483
|
-
|
484
|
-
|
485
|
-
|
486
|
-
y_testVisual = np.copy(y_test)
|
487
|
-
y_testVisual = np.argmax(y_testVisual, axis=1)
|
488
|
-
|
489
|
-
plt.figure(figsize=(12, 6))
|
490
|
-
sns.kdeplot(y_testVisual, label='Real Outputs', fill=True)
|
491
|
-
sns.kdeplot(TestPredictions, label='Predictions', fill=True)
|
492
|
-
plt.legend()
|
493
|
-
plt.xlabel('Class')
|
494
|
-
plt.ylabel('Data size')
|
495
|
-
plt.title('Predictions and Real Outputs for Testing KDE Plot')
|
496
|
-
plt.show()
|
497
|
-
|
498
|
-
if inpIndex + 1 != len(x_test):
|
499
|
-
|
500
|
-
plt.close('all')
|
483
|
+
if show_metrices == True:
|
484
|
+
acc_list.append(acc)
|
485
|
+
y_preds[inpIndex] = PredictedOutput
|
501
486
|
|
502
487
|
uni_end_time = time.time()
|
503
488
|
|
@@ -514,7 +499,9 @@ def evaluate(
|
|
514
499
|
elif calculating_est > 3600:
|
515
500
|
print('\rest......(h):',calculating_est/3600,'\n',end= "")
|
516
501
|
print('\rTest accuracy: ' ,acc ,"\n", end="")
|
517
|
-
|
502
|
+
if show_metrices == True:
|
503
|
+
plot_evaluate(y_test, y_preds, acc_list)
|
504
|
+
|
518
505
|
EndTime = time.time()
|
519
506
|
for i, w in enumerate(Wc):
|
520
507
|
W[i] = np.copy(w)
|
@@ -550,7 +537,138 @@ def evaluate(
|
|
550
537
|
|
551
538
|
|
552
539
|
|
553
|
-
return W,
|
540
|
+
return W,y_preds,acc
|
541
|
+
|
542
|
+
|
543
|
+
def multiple_evaluate(
|
544
|
+
x_test, # list[num]: Test input data.
|
545
|
+
y_test, # list[num]: Test labels.
|
546
|
+
activation_potentials, # float: Input activation potential
|
547
|
+
show_metrices, # (bool): (True or False)
|
548
|
+
MW # list[list[num]]: Weight matrix of the neural network.
|
549
|
+
) -> tuple:
|
550
|
+
infoTestModel = """
|
551
|
+
Tests the neural network model with the given test data.
|
552
|
+
|
553
|
+
Args:
|
554
|
+
x_test (list[num]): Test input data.
|
555
|
+
y_test (list[num]): Test labels.
|
556
|
+
activation_potential (float): Input activation potential
|
557
|
+
show_metrices (bool): (True or False)
|
558
|
+
MW (list(list[num])): Multiple Weight matrix list of the neural network. (Multiple model testing)
|
559
|
+
|
560
|
+
Returns:
|
561
|
+
tuple: A tuple containing the predicted labels and the accuracy of the model.
|
562
|
+
"""
|
563
|
+
|
564
|
+
layers = ['fex','cat']
|
565
|
+
|
566
|
+
try:
|
567
|
+
acc_list = []
|
568
|
+
print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
|
569
|
+
start_time = time.time()
|
570
|
+
true = 0
|
571
|
+
for inpIndex,Input in enumerate(x_test):
|
572
|
+
|
573
|
+
output_layer = 0
|
574
|
+
|
575
|
+
for m, Model in enumerate(MW):
|
576
|
+
|
577
|
+
W = Model
|
578
|
+
|
579
|
+
Wc = [0] * len(W) # Wc = weight copy
|
580
|
+
|
581
|
+
y_preds = [None] * len(y_test)
|
582
|
+
for i, w in enumerate(W):
|
583
|
+
Wc[i] = np.copy(w)
|
584
|
+
|
585
|
+
Input = np.array(Input)
|
586
|
+
Input = Input.ravel()
|
587
|
+
uni_start_time = time.time()
|
588
|
+
neural_layer = Input
|
589
|
+
|
590
|
+
for index, Layer in enumerate(layers):
|
591
|
+
|
592
|
+
neural_layer = normalization(neural_layer)
|
593
|
+
|
594
|
+
if layers[index] == 'fex':
|
595
|
+
neural_layer = fex(neural_layer, W[index], activation_potentials[m], None, False, None)
|
596
|
+
if layers[index] == 'cat':
|
597
|
+
neural_layer = cat(neural_layer, W[index], False, None)
|
598
|
+
|
599
|
+
output_layer += neural_layer
|
600
|
+
|
601
|
+
for i, w in enumerate(Wc):
|
602
|
+
W[i] = np.copy(w)
|
603
|
+
for i, w in enumerate(Wc):
|
604
|
+
W[i] = np.copy(w)
|
605
|
+
RealOutput = np.argmax(y_test[inpIndex])
|
606
|
+
PredictedOutput = np.argmax(output_layer)
|
607
|
+
if RealOutput == PredictedOutput:
|
608
|
+
true += 1
|
609
|
+
acc = true / len(y_test)
|
610
|
+
if show_metrices == True:
|
611
|
+
|
612
|
+
acc_list.append(acc)
|
613
|
+
|
614
|
+
y_preds[inpIndex] = PredictedOutput
|
615
|
+
|
616
|
+
uni_end_time = time.time()
|
617
|
+
|
618
|
+
calculating_est = round((uni_end_time - uni_start_time) * (len(x_test) - inpIndex),3)
|
619
|
+
|
620
|
+
if calculating_est < 60:
|
621
|
+
print('\rest......(sec):',calculating_est,'\n',end= "")
|
622
|
+
print('\rTest accuracy: ' ,acc ,"\n", end="")
|
623
|
+
|
624
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
625
|
+
print('\rest......(min):',calculating_est/60,'\n',end= "")
|
626
|
+
print('\rTest accuracy: ' ,acc ,"\n", end="")
|
627
|
+
|
628
|
+
elif calculating_est > 3600:
|
629
|
+
print('\rest......(h):',calculating_est/3600,'\n',end= "")
|
630
|
+
print('\rTest accuracy: ' ,acc ,"\n", end="")
|
631
|
+
|
632
|
+
if show_metrices == True:
|
633
|
+
|
634
|
+
plot_evaluate(y_test, y_preds, acc_list)
|
635
|
+
|
636
|
+
EndTime = time.time()
|
637
|
+
for i, w in enumerate(Wc):
|
638
|
+
W[i] = np.copy(w)
|
639
|
+
|
640
|
+
calculating_est = round(EndTime - start_time,2)
|
641
|
+
|
642
|
+
print(Fore.GREEN + "\nTest Finished with 0 ERROR\n")
|
643
|
+
|
644
|
+
if calculating_est < 60:
|
645
|
+
print('Total testing time(sec): ',calculating_est)
|
646
|
+
|
647
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
648
|
+
print('Total testing time(min): ',calculating_est/60)
|
649
|
+
|
650
|
+
elif calculating_est > 3600:
|
651
|
+
print('Total testing time(h): ',calculating_est/3600)
|
652
|
+
|
653
|
+
if acc >= 0.8:
|
654
|
+
print(Fore.GREEN + '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
|
655
|
+
|
656
|
+
elif acc < 0.8 and acc > 0.6:
|
657
|
+
print(Fore.MAGENTA + '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
|
658
|
+
|
659
|
+
elif acc <= 0.6:
|
660
|
+
print(Fore.RED+ '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
|
661
|
+
|
662
|
+
|
663
|
+
|
664
|
+
except:
|
665
|
+
|
666
|
+
print(Fore.RED + "ERROR: Testing model parameters like 'activation_potential' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: evaluate" + infoTestModel + Style.RESET_ALL)
|
667
|
+
return 'e'
|
668
|
+
|
669
|
+
|
670
|
+
|
671
|
+
return W,y_preds,acc
|
554
672
|
|
555
673
|
def save_model(model_name,
|
556
674
|
model_type,
|
@@ -560,6 +678,7 @@ def save_model(model_name,
|
|
560
678
|
weights_type,
|
561
679
|
weights_format,
|
562
680
|
model_path,
|
681
|
+
scaler,
|
563
682
|
W
|
564
683
|
):
|
565
684
|
|
@@ -575,6 +694,7 @@ def save_model(model_name,
|
|
575
694
|
weights_type (str): Type of weights to save (options: 'txt', 'npy', 'mat').
|
576
695
|
WeightFormat (str): Format of the weights (options: 'd', 'f', 'raw').
|
577
696
|
model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
|
697
|
+
scaler (bool): trained data it used standard_scaler ? (True or False)
|
578
698
|
W: Weights list of the model.
|
579
699
|
|
580
700
|
Returns:
|
@@ -584,7 +704,7 @@ def save_model(model_name,
|
|
584
704
|
# Operations to be performed by the function will be written here
|
585
705
|
pass
|
586
706
|
|
587
|
-
layers = ['fex','cat']
|
707
|
+
layers = ['fex','cat']
|
588
708
|
|
589
709
|
if weights_type != 'txt' and weights_type != 'npy' and weights_type != 'mat':
|
590
710
|
print(Fore.RED + "ERROR110: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' from: save_model" + infosave_model + Style.RESET_ALL)
|
@@ -621,7 +741,8 @@ def save_model(model_name,
|
|
621
741
|
'SAVE DATE': datetime.now(),
|
622
742
|
'WEIGHTS TYPE': weights_type,
|
623
743
|
'WEIGHTS FORMAT': weights_format,
|
624
|
-
'MODEL PATH': model_path
|
744
|
+
'MODEL PATH': model_path,
|
745
|
+
'STANDARD SCALER': scaler
|
625
746
|
}
|
626
747
|
try:
|
627
748
|
|
@@ -735,17 +856,11 @@ def load_model(model_name,
|
|
735
856
|
print(Fore.RED + "ERROR: Model Path error. accaptable form: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: load_model" + infoload_model + Style.RESET_ALL)
|
736
857
|
|
737
858
|
model_name = str(df['MODEL NAME'].iloc[0])
|
738
|
-
layers = df['LAYERS'].tolist()
|
739
859
|
layer_count = int(df['LAYER COUNT'].iloc[0])
|
740
|
-
class_count = int(df['CLASS COUNT'].iloc[0])
|
741
860
|
activation_potential = int(df['ACTIVATION POTENTIAL'].iloc[0])
|
742
|
-
NeuronCount = int(df['NEURON COUNT'].iloc[0])
|
743
|
-
SynapseCount = int(df['SYNAPSE COUNT'].iloc[0])
|
744
|
-
test_acc = int(df['TEST ACCURACY'].iloc[0])
|
745
|
-
model_type = str(df['MODEL TYPE'].iloc[0])
|
746
861
|
WeightType = str(df['WEIGHTS TYPE'].iloc[0])
|
747
|
-
WeightFormat = str(df['WEIGHTS FORMAT'].iloc[0])
|
748
862
|
model_path = str(df['MODEL PATH'].iloc[0])
|
863
|
+
|
749
864
|
|
750
865
|
W = [0] * layer_count
|
751
866
|
|
@@ -761,9 +876,9 @@ def load_model(model_name,
|
|
761
876
|
else:
|
762
877
|
raise ValueError(Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy' or 'mat' from: load_model." + infoload_model + Style.RESET_ALL)
|
763
878
|
print(Fore.GREEN + "Model loaded succesfully" + Style.RESET_ALL)
|
764
|
-
return W,activation_potential,df
|
879
|
+
return W, activation_potential, df
|
765
880
|
|
766
|
-
def predict_model_ssd(Input,model_name,model_path):
|
881
|
+
def predict_model_ssd(Input, model_name, model_path):
|
767
882
|
|
768
883
|
infopredict_model_ssd = """
|
769
884
|
Function to make a prediction using a divided pruning learning artificial neural network (PLAN).
|
@@ -775,7 +890,13 @@ def predict_model_ssd(Input,model_name,model_path):
|
|
775
890
|
Returns:
|
776
891
|
ndarray: Output from the model.
|
777
892
|
"""
|
778
|
-
W,activation_potential = load_model(model_name,model_path)
|
893
|
+
W, activation_potential, df = load_model(model_name,model_path)
|
894
|
+
|
895
|
+
scaler = str(df['STANDARD SCALER'].iloc[0])
|
896
|
+
|
897
|
+
if scaler == 'True':
|
898
|
+
|
899
|
+
Input = standard_scaler(Input, None)
|
779
900
|
|
780
901
|
layers = ['fex','cat']
|
781
902
|
|
@@ -791,9 +912,9 @@ def predict_model_ssd(Input,model_name,model_path):
|
|
791
912
|
neural_layer = normalization(neural_layer)
|
792
913
|
|
793
914
|
if layers[index] == 'fex':
|
794
|
-
neural_layer = fex(neural_layer, W[index],
|
915
|
+
neural_layer = fex(neural_layer, W[index], activation_potential, None, False, None)
|
795
916
|
if layers[index] == 'cat':
|
796
|
-
neural_layer = cat(neural_layer, W[index],
|
917
|
+
neural_layer = cat(neural_layer, W[index], False, None)
|
797
918
|
except:
|
798
919
|
print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: predict_model_ssd" + infopredict_model_ssd + Style.RESET_ALL)
|
799
920
|
return 'e'
|
@@ -802,7 +923,7 @@ def predict_model_ssd(Input,model_name,model_path):
|
|
802
923
|
return neural_layer
|
803
924
|
|
804
925
|
|
805
|
-
def predict_model_ram(Input,activation_potential,W):
|
926
|
+
def predict_model_ram(Input, activation_potential, scaler, W):
|
806
927
|
|
807
928
|
infopredict_model_ram = """
|
808
929
|
Function to make a prediction using a divided pruning learning artificial neural network (PLAN).
|
@@ -811,11 +932,15 @@ def predict_model_ram(Input,activation_potential,W):
|
|
811
932
|
Arguments:
|
812
933
|
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
813
934
|
activation_potential (float): Activation potential.
|
935
|
+
scaler (bool): trained data it used standard_scaler ? (True or False)
|
814
936
|
W (list of ndarrays): Weights of the model.
|
815
937
|
|
816
938
|
Returns:
|
817
939
|
ndarray: Output from the model.
|
818
940
|
"""
|
941
|
+
if scaler == True:
|
942
|
+
|
943
|
+
Input = standard_scaler(Input, None)
|
819
944
|
|
820
945
|
layers = ['fex','cat']
|
821
946
|
|
@@ -831,9 +956,9 @@ def predict_model_ram(Input,activation_potential,W):
|
|
831
956
|
neural_layer = normalization(neural_layer)
|
832
957
|
|
833
958
|
if layers[index] == 'fex':
|
834
|
-
neural_layer = fex(neural_layer, W[index],
|
959
|
+
neural_layer = fex(neural_layer, W[index], activation_potential, None, False, None)
|
835
960
|
if layers[index] == 'cat':
|
836
|
-
neural_layer = cat(neural_layer, W[index],
|
961
|
+
neural_layer = cat(neural_layer, W[index], False, None)
|
837
962
|
|
838
963
|
except:
|
839
964
|
print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." + infopredict_model_ram + Style.RESET_ALL)
|
@@ -843,117 +968,419 @@ def predict_model_ram(Input,activation_potential,W):
|
|
843
968
|
return neural_layer
|
844
969
|
|
845
970
|
|
846
|
-
|
847
|
-
|
848
|
-
|
971
|
+
|
972
|
+
def auto_balancer(x_train, y_train):
|
973
|
+
|
974
|
+
infoauto_balancer = """
|
849
975
|
Function to balance the training data across different classes.
|
850
976
|
|
851
977
|
Arguments:
|
852
978
|
x_train (list): Input data for training.
|
853
979
|
y_train (list): Labels corresponding to the input data.
|
854
|
-
class_count (int): Number of classes.
|
855
980
|
|
856
981
|
Returns:
|
857
982
|
tuple: A tuple containing balanced input data and labels.
|
858
983
|
"""
|
859
|
-
|
860
|
-
|
984
|
+
classes = np.arange(y_train.shape[1])
|
985
|
+
class_count = len(classes)
|
986
|
+
|
987
|
+
try:
|
988
|
+
ClassIndices = {i: np.where(np.array(y_train)[:, i] == 1)[
|
989
|
+
0] for i in range(class_count)}
|
861
990
|
classes = [len(ClassIndices[i]) for i in range(class_count)]
|
862
|
-
|
991
|
+
|
863
992
|
if len(set(classes)) == 1:
|
864
|
-
print(Fore.WHITE + "INFO: All training data have already balanced. from: auto_balancer"
|
993
|
+
print(Fore.WHITE + "INFO: All training data have already balanced. from: auto_balancer" + Style.RESET_ALL)
|
865
994
|
return x_train, y_train
|
866
|
-
|
995
|
+
|
867
996
|
MinCount = min(classes)
|
868
|
-
|
997
|
+
|
869
998
|
BalancedIndices = []
|
870
999
|
for i in range(class_count):
|
871
1000
|
if len(ClassIndices[i]) > MinCount:
|
872
|
-
SelectedIndices = np.random.choice(
|
1001
|
+
SelectedIndices = np.random.choice(
|
1002
|
+
ClassIndices[i], MinCount, replace=False)
|
873
1003
|
else:
|
874
1004
|
SelectedIndices = ClassIndices[i]
|
875
1005
|
BalancedIndices.extend(SelectedIndices)
|
876
|
-
|
1006
|
+
|
877
1007
|
BalancedInputs = [x_train[idx] for idx in BalancedIndices]
|
878
1008
|
BalancedLabels = [y_train[idx] for idx in BalancedIndices]
|
879
|
-
|
880
|
-
print(Fore.GREEN + "All Training Data Succesfully Balanced from: " + str(len(x_train)
|
881
|
-
|
1009
|
+
|
1010
|
+
print(Fore.GREEN + "All Training Data Succesfully Balanced from: " + str(len(x_train)
|
1011
|
+
) + " to: " + str(len(BalancedInputs)) + ". from: auto_balancer " + Style.RESET_ALL)
|
1012
|
+
except:
|
882
1013
|
print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoauto_balancer)
|
883
1014
|
return 'e'
|
884
|
-
|
885
|
-
|
886
|
-
|
887
|
-
|
1015
|
+
|
1016
|
+
return BalancedInputs, BalancedLabels
|
1017
|
+
|
1018
|
+
|
1019
|
+
def synthetic_augmentation(x_train, y_train):
|
888
1020
|
"""
|
889
1021
|
Generates synthetic examples to balance classes with fewer examples.
|
890
|
-
|
1022
|
+
|
891
1023
|
Arguments:
|
892
1024
|
x -- Input dataset (examples) - list format
|
893
1025
|
y -- Class labels (one-hot encoded) - list format
|
894
|
-
|
895
|
-
|
1026
|
+
|
896
1027
|
Returns:
|
897
1028
|
x_balanced -- Balanced input dataset (list format)
|
898
1029
|
y_balanced -- Balanced class labels (one-hot encoded, list format)
|
899
1030
|
"""
|
1031
|
+
x = x_train
|
1032
|
+
y = y_train
|
1033
|
+
classes = np.arange(y_train.shape[1])
|
1034
|
+
class_count = len(classes)
|
1035
|
+
|
900
1036
|
# Calculate class distribution
|
901
1037
|
class_distribution = {i: 0 for i in range(class_count)}
|
902
1038
|
for label in y:
|
903
1039
|
class_distribution[np.argmax(label)] += 1
|
904
|
-
|
1040
|
+
|
905
1041
|
max_class_count = max(class_distribution.values())
|
906
|
-
|
1042
|
+
|
907
1043
|
x_balanced = list(x)
|
908
1044
|
y_balanced = list(y)
|
909
|
-
|
1045
|
+
|
910
1046
|
for class_label in range(class_count):
|
911
|
-
class_indices = [i for i, label in enumerate(
|
1047
|
+
class_indices = [i for i, label in enumerate(
|
1048
|
+
y) if np.argmax(label) == class_label]
|
912
1049
|
num_samples = len(class_indices)
|
913
|
-
|
1050
|
+
|
914
1051
|
if num_samples < max_class_count:
|
915
1052
|
while num_samples < max_class_count:
|
916
|
-
|
917
|
-
random_indices = np.random.choice(
|
1053
|
+
|
1054
|
+
random_indices = np.random.choice(
|
1055
|
+
class_indices, 2, replace=False)
|
918
1056
|
sample1 = x[random_indices[0]]
|
919
1057
|
sample2 = x[random_indices[1]]
|
920
|
-
|
921
|
-
|
922
|
-
|
923
|
-
|
1058
|
+
|
1059
|
+
synthetic_sample = sample1 + \
|
1060
|
+
(np.array(sample2) - np.array(sample1)) * np.random.rand()
|
1061
|
+
|
924
1062
|
x_balanced.append(synthetic_sample.tolist())
|
925
|
-
y_balanced.append(y[class_indices[0]])
|
926
|
-
|
1063
|
+
y_balanced.append(y[class_indices[0]])
|
1064
|
+
|
927
1065
|
num_samples += 1
|
928
|
-
|
1066
|
+
|
929
1067
|
return np.array(x_balanced), np.array(y_balanced)
|
930
1068
|
|
1069
|
+
|
931
1070
|
def standard_scaler(x_train, x_test):
|
932
|
-
|
933
|
-
Standardizes training and test datasets.
|
1071
|
+
info_standard_scaler = """
|
1072
|
+
Standardizes training and test datasets. x_test may be None.
|
934
1073
|
|
935
1074
|
Args:
|
936
1075
|
train_data: numpy.ndarray
|
937
|
-
Training data
|
1076
|
+
Training data
|
938
1077
|
test_data: numpy.ndarray
|
939
|
-
Test data
|
1078
|
+
Test data
|
940
1079
|
|
941
1080
|
Returns:
|
942
1081
|
tuple
|
943
1082
|
Standardized training and test datasets
|
944
1083
|
"""
|
945
|
-
|
946
|
-
|
947
|
-
|
1084
|
+
try:
|
1085
|
+
|
1086
|
+
mean = np.mean(x_train, axis=0)
|
1087
|
+
std = np.std(x_train, axis=0)
|
1088
|
+
|
1089
|
+
if x_test == None:
|
1090
|
+
|
1091
|
+
train_data_scaled = (x_train - mean) / std
|
1092
|
+
return train_data_scaled
|
1093
|
+
|
1094
|
+
else:
|
1095
|
+
train_data_scaled = (x_train - mean) / std
|
1096
|
+
test_data_scaled = (x_test - mean) / std
|
1097
|
+
return train_data_scaled, test_data_scaled
|
1098
|
+
|
1099
|
+
except:
|
1100
|
+
print(
|
1101
|
+
Fore.RED + "ERROR: x_train and x_test must be list[numpyarray] from standard_scaler" + info_standard_scaler)
|
1102
|
+
|
1103
|
+
|
1104
|
+
def encode_one_hot(y_train, y_test):
|
1105
|
+
info_one_hot_encode = """
|
1106
|
+
Performs one-hot encoding on y_train and y_test data..
|
1107
|
+
|
1108
|
+
Args:
|
1109
|
+
y_train (numpy.ndarray): Eğitim etiketi verisi.
|
1110
|
+
y_test (numpy.ndarray): Test etiketi verisi.
|
1111
|
+
|
1112
|
+
Returns:
|
1113
|
+
tuple: One-hot encoded y_train ve y_test verileri.
|
1114
|
+
"""
|
1115
|
+
try:
|
1116
|
+
classes = np.unique(y_train)
|
1117
|
+
class_count = len(classes)
|
1118
|
+
|
1119
|
+
class_to_index = {cls: idx for idx, cls in enumerate(classes)}
|
1120
|
+
|
1121
|
+
y_train_encoded = np.zeros((y_train.shape[0], class_count))
|
1122
|
+
for i, label in enumerate(y_train):
|
1123
|
+
y_train_encoded[i, class_to_index[label]] = 1
|
1124
|
+
|
1125
|
+
y_test_encoded = np.zeros((y_test.shape[0], class_count))
|
1126
|
+
for i, label in enumerate(y_test):
|
1127
|
+
y_test_encoded[i, class_to_index[label]] = 1
|
1128
|
+
except:
|
1129
|
+
print(Fore.RED + 'ERROR: y_train and y_test must be numpy array. from: one_hot_encode' + info_one_hot_encode)
|
1130
|
+
|
1131
|
+
return y_train_encoded, y_test_encoded
|
1132
|
+
|
1133
|
+
|
1134
|
+
def split(X, y, test_size=0.2, random_state=None):
|
1135
|
+
"""
|
1136
|
+
Splits the given X (features) and y (labels) data into training and testing subsets.
|
1137
|
+
|
1138
|
+
Args:
|
1139
|
+
X (numpy.ndarray): Features data.
|
1140
|
+
y (numpy.ndarray): Labels data.
|
1141
|
+
test_size (float or int): Proportion or number of samples for the test subset.
|
1142
|
+
random_state (int or None): Seed for random state.
|
1143
|
+
|
1144
|
+
Returns:
|
1145
|
+
tuple: x_train, x_test, y_train, y_test as ordered training and testing data subsets.
|
1146
|
+
"""
|
1147
|
+
# Size of the dataset
|
1148
|
+
num_samples = X.shape[0]
|
1149
|
+
|
1150
|
+
if isinstance(test_size, float):
|
1151
|
+
test_size = int(test_size * num_samples)
|
1152
|
+
elif isinstance(test_size, int):
|
1153
|
+
if test_size > num_samples:
|
1154
|
+
raise ValueError(
|
1155
|
+
"test_size cannot be larger than the number of samples.")
|
1156
|
+
else:
|
1157
|
+
raise ValueError("test_size should be float or int.")
|
1158
|
+
|
1159
|
+
if random_state is not None:
|
1160
|
+
np.random.seed(random_state)
|
1161
|
+
|
1162
|
+
indices = np.arange(num_samples)
|
1163
|
+
np.random.shuffle(indices)
|
1164
|
+
|
1165
|
+
test_indices = indices[:test_size]
|
1166
|
+
train_indices = indices[test_size:]
|
1167
|
+
|
1168
|
+
x_train, x_test = X[train_indices], X[test_indices]
|
1169
|
+
y_train, y_test = y[train_indices], y[test_indices]
|
1170
|
+
|
1171
|
+
return x_train, x_test, y_train, y_test
|
1172
|
+
|
1173
|
+
|
1174
|
+
def metrics(y_ts, test_preds):
|
1175
|
+
"""
|
1176
|
+
Calculates precision, recall and F1 score for a classification task.
|
1177
|
+
|
1178
|
+
Args:
|
1179
|
+
y_test (list or numpy.ndarray): True labels.
|
1180
|
+
test_preds (list or numpy.ndarray): Predicted labels.
|
1181
|
+
|
1182
|
+
Returns:
|
1183
|
+
tuple: Precision, recall, F1 score.
|
1184
|
+
"""
|
1185
|
+
y_test_d = decode_one_hot(y_ts)
|
1186
|
+
y_test_d = np.array(y_test_d)
|
1187
|
+
y_pred = np.array(test_preds)
|
1188
|
+
|
1189
|
+
if y_test_d.ndim > 1:
|
1190
|
+
y_test_d = y_test_d.reshape(-1)
|
1191
|
+
if y_pred.ndim > 1:
|
1192
|
+
y_pred = y_pred.reshape(-1)
|
1193
|
+
|
1194
|
+
tp = {}
|
1195
|
+
fp = {}
|
1196
|
+
fn = {}
|
1197
|
+
|
1198
|
+
classes = np.unique(np.concatenate((y_test_d, y_pred)))
|
1199
|
+
|
1200
|
+
for c in classes:
|
1201
|
+
tp[c] = 0
|
1202
|
+
fp[c] = 0
|
1203
|
+
fn[c] = 0
|
1204
|
+
|
1205
|
+
for c in classes:
|
1206
|
+
for true, pred in zip(y_test_d, y_pred):
|
1207
|
+
if true == c and pred == c:
|
1208
|
+
tp[c] += 1
|
1209
|
+
elif true != c and pred == c:
|
1210
|
+
fp[c] += 1
|
1211
|
+
elif true == c and pred != c:
|
1212
|
+
fn[c] += 1
|
1213
|
+
|
1214
|
+
precision = {}
|
1215
|
+
recall = {}
|
1216
|
+
f1 = {}
|
1217
|
+
|
1218
|
+
for c in classes:
|
1219
|
+
precision[c] = tp[c] / (tp[c] + fp[c]) if (tp[c] + fp[c]) > 0 else 0
|
1220
|
+
recall[c] = tp[c] / (tp[c] + fn[c]) if (tp[c] + fn[c]) > 0 else 0
|
1221
|
+
f1[c] = 2 * (precision[c] * recall[c]) / (precision[c] +
|
1222
|
+
recall[c]) if (precision[c] + recall[c]) > 0 else 0
|
1223
|
+
|
1224
|
+
micro_precision = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(
|
1225
|
+
list(fp.values()))) if (np.sum(list(tp.values())) + np.sum(list(fp.values()))) > 0 else 0
|
1226
|
+
micro_recall = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(
|
1227
|
+
fn.values()))) if (np.sum(list(tp.values())) + np.sum(list(fn.values()))) > 0 else 0
|
1228
|
+
micro_f1 = 2 * (micro_precision * micro_recall) / (micro_precision +
|
1229
|
+
micro_recall) if (micro_precision + micro_recall) > 0 else 0
|
1230
|
+
|
1231
|
+
return micro_precision, micro_recall, micro_f1
|
1232
|
+
|
1233
|
+
|
1234
|
+
def decode_one_hot(encoded_data):
|
1235
|
+
"""
|
1236
|
+
Decodes one-hot encoded data to original categorical labels.
|
1237
|
+
|
1238
|
+
Args:
|
1239
|
+
encoded_data (numpy.ndarray): One-hot encoded data with shape (n_samples, n_classes).
|
1240
|
+
|
1241
|
+
Returns:
|
1242
|
+
numpy.ndarray: Decoded categorical labels with shape (n_samples,).
|
1243
|
+
"""
|
1244
|
+
|
1245
|
+
decoded_labels = np.argmax(encoded_data, axis=1)
|
1246
|
+
|
1247
|
+
return decoded_labels
|
1248
|
+
|
1249
|
+
|
1250
|
+
def roc_curve(y_true, y_score):
|
1251
|
+
"""
|
1252
|
+
Computes ROC curve.
|
1253
|
+
|
1254
|
+
Args:
|
1255
|
+
y_true (numpy.ndarray): True class labels (binary: 0 or 1).
|
1256
|
+
y_score (numpy.ndarray): Predicted probabilities for positive class.
|
1257
|
+
|
1258
|
+
Returns:
|
1259
|
+
tuple: FPR (False Positive Rate), TPR (True Positive Rate), thresholds.
|
1260
|
+
"""
|
1261
|
+
|
1262
|
+
idx = np.argsort(y_score)[::-1]
|
1263
|
+
y_score_sorted = y_score[idx]
|
1264
|
+
y_true_sorted = y_true[idx]
|
1265
|
+
|
1266
|
+
tpr = []
|
1267
|
+
fpr = []
|
1268
|
+
thresholds = np.linspace(0, 1, 100)
|
1269
|
+
|
1270
|
+
for threshold in thresholds:
|
1271
|
+
y_pred_binary = np.where(y_score_sorted >= threshold, 1, 0)
|
1272
|
+
|
1273
|
+
tp = np.sum((y_true_sorted == 1) & (y_pred_binary == 1))
|
1274
|
+
fn = np.sum((y_true_sorted == 1) & (y_pred_binary == 0))
|
1275
|
+
tn = np.sum((y_true_sorted == 0) & (y_pred_binary == 0))
|
1276
|
+
fp = np.sum((y_true_sorted == 0) & (y_pred_binary == 1))
|
1277
|
+
|
1278
|
+
# Check for division by zero
|
1279
|
+
if (tp + fn) == 0:
|
1280
|
+
tpr_value = 0.0
|
1281
|
+
else:
|
1282
|
+
tpr_value = tp / (tp + fn)
|
1283
|
+
|
1284
|
+
if (fp + tn) == 0:
|
1285
|
+
fpr_value = 0.0
|
1286
|
+
else:
|
1287
|
+
fpr_value = fp / (fp + tn)
|
1288
|
+
|
1289
|
+
tpr.append(tpr_value)
|
1290
|
+
fpr.append(fpr_value)
|
1291
|
+
|
1292
|
+
return fpr, tpr, thresholds
|
1293
|
+
|
1294
|
+
|
1295
|
+
def confusion_matrix(y_true, y_pred, class_count):
|
1296
|
+
"""
|
1297
|
+
Computes confusion matrix.
|
1298
|
+
|
1299
|
+
Args:
|
1300
|
+
y_true (numpy.ndarray): True class labels (1D array).
|
1301
|
+
y_pred (numpy.ndarray): Predicted class labels (1D array).
|
1302
|
+
num_classes (int): Number of classes.
|
1303
|
+
|
1304
|
+
Returns:
|
1305
|
+
numpy.ndarray: Confusion matrix of shape (num_classes, num_classes).
|
1306
|
+
"""
|
1307
|
+
confusion = np.zeros((class_count, class_count), dtype=int)
|
1308
|
+
|
1309
|
+
for i in range(len(y_true)):
|
1310
|
+
true_label = y_true[i]
|
1311
|
+
pred_label = y_pred[i]
|
1312
|
+
confusion[true_label, pred_label] += 1
|
1313
|
+
|
1314
|
+
return confusion
|
1315
|
+
|
1316
|
+
|
1317
|
+
def plot_evaluate(y_test, y_preds, acc_list):
|
1318
|
+
|
1319
|
+
acc = acc_list[len(acc_list) - 1]
|
1320
|
+
y_true = decode_one_hot(y_test)
|
1321
|
+
|
1322
|
+
y_true = np.array(y_true)
|
1323
|
+
y_preds = np.array(y_preds)
|
1324
|
+
fpr, tpr, thresholds = roc_curve(y_true, y_preds)
|
1325
|
+
precision, recall, f1 = metrics(y_test, y_preds)
|
1326
|
+
Class = np.unique(y_test)
|
1327
|
+
|
1328
|
+
|
1329
|
+
cm = confusion_matrix(y_true, y_preds, len(Class))
|
1330
|
+
|
1331
|
+
fig, axs = plt.subplots(2, 2, figsize=(16, 12))
|
1332
|
+
|
1333
|
+
# Confusion Matrix
|
1334
|
+
sns.heatmap(cm, annot=True, fmt='d', ax=axs[0, 0])
|
1335
|
+
axs[0, 0].set_title("Confusion Matrix")
|
1336
|
+
axs[0, 0].set_xlabel("Predicted Class")
|
1337
|
+
axs[0, 0].set_ylabel("Actual Class")
|
1338
|
+
|
1339
|
+
# ROC Curve
|
1340
|
+
axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label='ROC curve')
|
1341
|
+
axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
|
1342
|
+
axs[1, 0].set_xlim([0.0, 1.0])
|
1343
|
+
axs[1, 0].set_ylim([0.0, 1.05])
|
1344
|
+
axs[1, 0].set_xlabel('False Positive Rate')
|
1345
|
+
axs[1, 0].set_ylabel('True Positive Rate')
|
1346
|
+
axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
|
1347
|
+
axs[1, 0].legend(loc="lower right")
|
1348
|
+
|
1349
|
+
# Precision, Recall, F1 Score, Accuracy
|
1350
|
+
metric = ['Precision', 'Recall', 'F1 Score', 'Accuracy']
|
1351
|
+
values = [precision, recall, f1, acc]
|
1352
|
+
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728']
|
1353
|
+
|
1354
|
+
|
1355
|
+
bars = axs[0, 1].bar(metric, values, color=colors)
|
1356
|
+
|
1357
|
+
|
1358
|
+
for bar, value in zip(bars, values):
|
1359
|
+
axs[0, 1].text(bar.get_x() + bar.get_width() / 2, bar.get_height() - 0.05, f'{value:.2f}',
|
1360
|
+
ha='center', va='bottom', fontsize=12, color='white', weight='bold')
|
1361
|
+
|
1362
|
+
axs[0, 1].set_ylim(0, 1)
|
1363
|
+
axs[0, 1].set_xlabel('Metrics')
|
1364
|
+
axs[0, 1].set_ylabel('Score')
|
1365
|
+
axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy')
|
1366
|
+
axs[0, 1].grid(True, axis='y', linestyle='--', alpha=0.7)
|
948
1367
|
|
1368
|
+
|
1369
|
+
plt.plot(acc_list, marker='o', linestyle='-',
|
1370
|
+
color='r', label='Accuracy')
|
949
1371
|
|
950
|
-
train_data_scaled = (x_train - mean) / std
|
951
|
-
test_data_scaled = (x_test - mean) / std
|
952
1372
|
|
953
|
-
|
954
|
-
|
1373
|
+
plt.axhline(y=1, color='g', linestyle='--', label='Maximum Accuracy')
|
1374
|
+
|
1375
|
+
|
1376
|
+
plt.xlabel('Samples')
|
1377
|
+
plt.ylabel('Accuracy')
|
1378
|
+
plt.title('Accuracy History')
|
1379
|
+
plt.legend()
|
955
1380
|
|
956
|
-
|
1381
|
+
|
1382
|
+
plt.tight_layout()
|
1383
|
+
plt.show()
|
957
1384
|
|
958
1385
|
def get_weights():
|
959
1386
|
|
@@ -973,4 +1400,5 @@ def get_acc():
|
|
973
1400
|
|
974
1401
|
def get_pot():
|
975
1402
|
|
976
|
-
return 1
|
1403
|
+
return 1
|
1404
|
+
|