pyerualjetwork 2.6.6__py3-none-any.whl → 2.7.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- plan/plan.py +129 -141
- {pyerualjetwork-2.6.6.dist-info → pyerualjetwork-2.7.8.dist-info}/METADATA +2 -2
- pyerualjetwork-2.7.8.dist-info/RECORD +6 -0
- {pyerualjetwork-2.6.6.dist-info → pyerualjetwork-2.7.8.dist-info}/WHEEL +1 -1
- pyerualjetwork-2.6.6.dist-info/RECORD +0 -6
- {pyerualjetwork-2.6.6.dist-info → pyerualjetwork-2.7.8.dist-info}/top_level.txt +0 -0
plan/plan.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1
|
-
|
2
1
|
# -*- coding: utf-8 -*-
|
3
2
|
"""
|
4
3
|
Created on Tue Jun 18 23:32:16 2024
|
@@ -11,7 +10,6 @@ import numpy as np
|
|
11
10
|
import time
|
12
11
|
from colorama import Fore, Style
|
13
12
|
from typing import List, Union
|
14
|
-
import math
|
15
13
|
from scipy.special import expit, softmax
|
16
14
|
import matplotlib.pyplot as plt
|
17
15
|
import seaborn as sns
|
@@ -23,12 +21,13 @@ from tqdm import tqdm
|
|
23
21
|
def fit(
|
24
22
|
x_train: List[Union[int, float]],
|
25
23
|
y_train: List[Union[int, float]], # At least two.. and one hot encoded
|
26
|
-
show_training,
|
27
|
-
show_count= None,
|
28
24
|
val= None,
|
25
|
+
val_count = None,
|
26
|
+
activation_potentiation=None, # (float): Input activation_potentiation (optional)
|
29
27
|
x_val= None,
|
30
28
|
y_val= None,
|
31
|
-
|
29
|
+
show_training = None,
|
30
|
+
show_count= None
|
32
31
|
) -> str:
|
33
32
|
|
34
33
|
infoPLAN = """
|
@@ -37,33 +36,53 @@ def fit(
|
|
37
36
|
Args:
|
38
37
|
x_train (list[num]): List of input data.
|
39
38
|
y_train (list[num]): List of target labels. (one hot encoded)
|
40
|
-
|
41
|
-
show_count (None, int): How many learning steps in total will be displayed in a single figure? (Adjust according to your hardware) Default: 10 (optional)
|
42
|
-
val (None, True or 'final'): validation in training process ? None, True or 'final' Default: None
|
39
|
+
val (None, True or 'final'): validation in training process ? None, True or 'final' Default: None (optional)
|
43
40
|
val_count (None, int): After how many examples learned will an accuracy test be performed? Default: 0.1 (%10) (optional)
|
44
|
-
x_val (list[num]): List of validation data. (optional) Default: x_train
|
45
|
-
y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: y_train
|
46
41
|
activation_potentiation (float): Input activation potentiation (for binary injection) (optional) in range: -1, 1
|
42
|
+
x_val (list[num]): List of validation data. (optional) Default: 1% of x_train (auto_balanced) it means every %1 of train progress starts validation
|
43
|
+
y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: 1% of y_train (auto_balanced) it means every %1 of train progress starts validation
|
44
|
+
show_training (bool, str): True, None or'final'
|
45
|
+
show_count (None, int): How many learning steps in total will be displayed in a single figure? (Adjust according to your hardware) Default: 10 (optional)
|
47
46
|
Returns:
|
48
47
|
list([num]): (Weight matrices list, train_predictions list, Train_acc).
|
49
48
|
error handled ?: Process status ('e')
|
50
49
|
"""
|
51
|
-
|
52
50
|
|
53
51
|
if len(x_train) != len(y_train):
|
54
52
|
|
55
|
-
print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit", infoPLAN)
|
53
|
+
print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit", infoPLAN + Style.RESET_ALL)
|
56
54
|
return 'e'
|
57
|
-
|
58
|
-
if x_val == None and y_val == None:
|
59
|
-
|
60
|
-
x_val = x_train
|
61
|
-
y_val = y_train
|
62
55
|
|
63
56
|
if val == True or val == 'final':
|
57
|
+
|
58
|
+
try:
|
59
|
+
|
60
|
+
if x_val == None and y_val == None:
|
61
|
+
|
62
|
+
x_train, x_val, y_train, y_val = split(x_train, y_train, test_size=0.1, random_state=42)
|
63
|
+
|
64
|
+
x_train, y_train = auto_balancer(x_train, y_train)
|
65
|
+
x_val, y_val = auto_balancer(x_val, y_val)
|
66
|
+
|
67
|
+
except:
|
68
|
+
pass
|
69
|
+
|
70
|
+
if val == True:
|
71
|
+
|
72
|
+
if val_count == None:
|
73
|
+
|
74
|
+
val_count = 0.01
|
75
|
+
|
76
|
+
v_iter = 0
|
77
|
+
|
78
|
+
if val == 'final':
|
79
|
+
|
80
|
+
val_count = 0.99
|
64
81
|
|
65
|
-
|
66
|
-
|
82
|
+
val_count = int(len(x_train) * val_count)
|
83
|
+
val_count_copy = val_count
|
84
|
+
val_bar = tqdm(total=1, desc="Validating Accuracy", ncols=120)
|
85
|
+
val_list = [] * val_count
|
67
86
|
|
68
87
|
if show_count == None:
|
69
88
|
|
@@ -85,7 +104,6 @@ def fit(
|
|
85
104
|
|
86
105
|
neurons = [len(class_count), len(class_count)]
|
87
106
|
layers = ['fex']
|
88
|
-
val_list = []
|
89
107
|
|
90
108
|
x_train[0] = np.array(x_train[0])
|
91
109
|
x_train[0] = x_train[0].ravel()
|
@@ -94,12 +112,14 @@ def fit(
|
|
94
112
|
STPW = weight_identification(
|
95
113
|
len(layers) - 1, len(class_count), neurons, x_train_size) # STPW = SHORT TIME POTENTIATION WEIGHT
|
96
114
|
|
97
|
-
LTPW = [
|
115
|
+
LTPW = [0] * len(STPW) # LTPW = LONG TIME POTENTIATION WEIGHT
|
98
116
|
|
99
117
|
y = decode_one_hot(y_train)
|
100
118
|
|
119
|
+
train_progress = tqdm(total=len(x_train),leave=False, desc="Training",ncols= 120)
|
120
|
+
|
101
121
|
for index, inp in enumerate(x_train):
|
102
|
-
|
122
|
+
|
103
123
|
progress = index / len(x_train) * 100
|
104
124
|
|
105
125
|
inp = np.array(inp)
|
@@ -114,8 +134,6 @@ def fit(
|
|
114
134
|
|
115
135
|
for Lindex, Layer in enumerate(layers):
|
116
136
|
|
117
|
-
neural_layer = normalization(neural_layer)
|
118
|
-
|
119
137
|
if Layer == 'fex':
|
120
138
|
STPW[Lindex] = fex(neural_layer, STPW[Lindex], True, y[index], activation_potentiation)
|
121
139
|
|
@@ -123,31 +141,28 @@ def fit(
|
|
123
141
|
LTPW[i] = LTPW[i] + w
|
124
142
|
|
125
143
|
|
126
|
-
if val == True:
|
144
|
+
if val == True and index == val_count:
|
127
145
|
|
128
|
-
if show_training == True:
|
129
|
-
try:
|
130
|
-
plt.close(fig)
|
131
146
|
|
132
|
-
|
133
|
-
pass
|
147
|
+
val_count += val_count_copy
|
134
148
|
|
135
|
-
validation_model = evaluate(x_val, y_val,
|
149
|
+
validation_model = evaluate(x_val, y_val, LTPW, activation_potentiation, None)
|
136
150
|
|
137
151
|
val_acc = validation_model[get_acc()]
|
138
152
|
|
139
153
|
val_list.append(val_acc)
|
140
154
|
|
141
|
-
if
|
155
|
+
if v_iter == 0:
|
142
156
|
|
143
157
|
val_bar.update(val_acc)
|
144
|
-
|
145
|
-
if index != 0:
|
146
158
|
|
147
|
-
|
159
|
+
|
160
|
+
if v_iter != 0:
|
148
161
|
|
162
|
+
val_acc = val_acc - val_list[v_iter - 1]
|
149
163
|
val_bar.update(val_acc)
|
150
164
|
|
165
|
+
v_iter += 1
|
151
166
|
|
152
167
|
if show_training == True:
|
153
168
|
|
@@ -192,17 +207,18 @@ def fit(
|
|
192
207
|
title_info = 'Weight Matrix Of Fex Layer'
|
193
208
|
|
194
209
|
|
195
|
-
|
210
|
+
|
196
211
|
|
197
212
|
progress_status = f"{progress:.1f}"
|
198
213
|
fig.suptitle(suptitle_info + progress_status)
|
199
214
|
plt.draw()
|
200
215
|
plt.pause(0.1)
|
201
216
|
|
202
|
-
|
203
|
-
|
217
|
+
|
218
|
+
STPW = weight_identification(
|
204
219
|
len(layers) - 1, len(class_count), neurons, x_train_size)
|
205
220
|
|
221
|
+
train_progress.update(1)
|
206
222
|
|
207
223
|
if show_training == 'final':
|
208
224
|
|
@@ -227,15 +243,16 @@ def fit(
|
|
227
243
|
|
228
244
|
if val == 'final':
|
229
245
|
|
230
|
-
validation_model = evaluate(x_val, y_val,
|
246
|
+
validation_model = evaluate(x_val, y_val, LTPW, activation_potentiation, bar_status=None, show_metrices=None)
|
231
247
|
|
232
248
|
val_acc = validation_model[get_acc()]
|
233
249
|
|
234
250
|
val_list.append(val_acc)
|
235
251
|
|
236
252
|
val_bar.update(val_acc)
|
237
|
-
|
238
|
-
|
253
|
+
|
254
|
+
LTPW = normalization(LTPW)
|
255
|
+
|
239
256
|
return LTPW
|
240
257
|
|
241
258
|
# FUNCTIONS -----
|
@@ -449,10 +466,10 @@ def Relu(
|
|
449
466
|
def evaluate(
|
450
467
|
x_test, # list[num]: Test input data.
|
451
468
|
y_test, # list[num]: Test labels.
|
452
|
-
show_metrices, # show_metrices (bool): (True or False)
|
453
469
|
W, # list[num]: Weight matrix list of the neural network.
|
454
|
-
activation_potentiation=None, # activation_potentiation (float or None): Threshold value for comparison. (optional)
|
455
|
-
|
470
|
+
activation_potentiation=None, # activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
|
471
|
+
bar_status=True, # bar_status (bool): Loading bar for accuracy (True or None) (optional) Default: True
|
472
|
+
show_metrices=None # show_metrices (bool): (True or None) (optional) Default: None
|
456
473
|
) -> tuple:
|
457
474
|
infoTestModel = """
|
458
475
|
Tests the neural network model with the given test data.
|
@@ -460,18 +477,17 @@ def evaluate(
|
|
460
477
|
Args:
|
461
478
|
x_test (list[num]): Test input data.
|
462
479
|
y_test (list[num]): Test labels.
|
463
|
-
show_metrices (bool): (True or False)
|
464
480
|
W (list[num]): Weight matrix list of the neural network.
|
465
|
-
activation_potentiation (float or None): Threshold value for comparison. (optional)
|
466
|
-
|
481
|
+
activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
|
482
|
+
bar_status (bool): Loading bar for accuracy (True or None) (optional) Default: True
|
483
|
+
show_metrices (bool): (True or None) (optional) Default: None
|
467
484
|
|
468
485
|
Returns:
|
469
486
|
tuple: A tuple containing the predicted labels and the accuracy of the model.
|
470
487
|
"""
|
471
|
-
|
472
|
-
layers = ['fex']
|
473
|
-
|
474
488
|
try:
|
489
|
+
layers = ['fex']
|
490
|
+
|
475
491
|
Wc = [0] * len(W) # Wc = Weight copy
|
476
492
|
true = 0
|
477
493
|
y_preds = [-1] * len(y_test)
|
@@ -479,25 +495,25 @@ def evaluate(
|
|
479
495
|
|
480
496
|
for i, w in enumerate(W):
|
481
497
|
Wc[i] = np.copy(w)
|
482
|
-
print('\rCopying weights.....', i+1, '/', len(W), end="")
|
483
498
|
|
484
|
-
|
485
|
-
if
|
486
|
-
|
499
|
+
|
500
|
+
if bar_status == True:
|
501
|
+
|
502
|
+
test_progress = tqdm(total=len(x_test),leave=False, desc='Testing',ncols=120)
|
503
|
+
acc_bar = tqdm(total=1, desc="Test Accuracy", ncols=120)
|
504
|
+
|
505
|
+
|
487
506
|
for inpIndex, Input in enumerate(x_test):
|
488
507
|
Input = np.array(Input)
|
489
508
|
Input = Input.ravel()
|
490
|
-
uni_start_time = time.time()
|
491
509
|
neural_layer = Input
|
492
|
-
|
510
|
+
|
493
511
|
for index, Layer in enumerate(layers):
|
494
|
-
|
495
|
-
neural_layer = normalization(neural_layer)
|
496
|
-
|
512
|
+
|
497
513
|
if Layer == 'fex':
|
498
514
|
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
|
499
|
-
|
500
|
-
|
515
|
+
|
516
|
+
|
501
517
|
for i, w in enumerate(Wc):
|
502
518
|
W[i] = np.copy(w)
|
503
519
|
RealOutput = np.argmax(y_test[inpIndex])
|
@@ -505,31 +521,30 @@ def evaluate(
|
|
505
521
|
if RealOutput == PredictedOutput:
|
506
522
|
true += 1
|
507
523
|
acc = true / len(y_test)
|
508
|
-
|
509
|
-
|
524
|
+
|
525
|
+
|
510
526
|
acc_list.append(acc)
|
511
527
|
y_preds[inpIndex] = PredictedOutput
|
512
528
|
|
513
|
-
if
|
529
|
+
if bar_status == True:
|
530
|
+
test_progress.update(1)
|
514
531
|
if inpIndex == 0:
|
515
532
|
acc_bar.update(acc)
|
516
533
|
|
517
534
|
else:
|
518
535
|
acc = acc - acc_list[inpIndex - 1]
|
519
536
|
acc_bar.update(acc)
|
520
|
-
|
521
|
-
if show_metrices == True:
|
537
|
+
|
538
|
+
if show_metrices == True:
|
522
539
|
plot_evaluate(y_test, y_preds, acc_list)
|
523
540
|
|
524
541
|
|
525
542
|
for i, w in enumerate(Wc):
|
526
543
|
W[i] = np.copy(w)
|
527
|
-
|
544
|
+
|
528
545
|
except:
|
529
|
-
|
530
|
-
print(Fore.RED +
|
531
|
-
infoTestModel + Style.RESET_ALL)
|
532
|
-
return 'e'
|
546
|
+
|
547
|
+
print(Fore.RED + 'ERROR:' + infoTestModel + Style.RESET_ALL)
|
533
548
|
|
534
549
|
return W, y_preds, acc
|
535
550
|
|
@@ -902,7 +917,7 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
902
917
|
|
903
918
|
except:
|
904
919
|
|
905
|
-
|
920
|
+
pass
|
906
921
|
|
907
922
|
|
908
923
|
layers = ['fex']
|
@@ -915,9 +930,7 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
915
930
|
neural_layer = np.array(neural_layer)
|
916
931
|
neural_layer = neural_layer.ravel()
|
917
932
|
for index, Layer in enumerate(layers):
|
918
|
-
|
919
|
-
neural_layer = normalization(neural_layer)
|
920
|
-
|
933
|
+
|
921
934
|
if Layer == 'fex':
|
922
935
|
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
|
923
936
|
elif Layer == 'cat':
|
@@ -931,7 +944,7 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
931
944
|
return neural_layer
|
932
945
|
|
933
946
|
|
934
|
-
def predict_model_ram(Input,
|
947
|
+
def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=None):
|
935
948
|
|
936
949
|
infopredict_model_ram = """
|
937
950
|
Function to make a prediction using a divided potentiation learning artificial neural network (PLAN).
|
@@ -939,17 +952,19 @@ def predict_model_ram(Input, scaler_params, W, activation_potentiation=None):
|
|
939
952
|
|
940
953
|
Arguments:
|
941
954
|
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
942
|
-
scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
943
955
|
W (list of ndarrays): Weights of the model.
|
944
|
-
|
956
|
+
scaler_params (int, float): standard scaler params list: mean,std. (optional) Default: None.
|
957
|
+
activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
|
945
958
|
|
946
959
|
Returns:
|
947
960
|
ndarray: Output from the model.
|
948
961
|
"""
|
949
|
-
|
950
|
-
|
962
|
+
try:
|
963
|
+
if scaler_params != None:
|
951
964
|
|
952
|
-
|
965
|
+
Input = standard_scaler(None, Input, scaler_params)
|
966
|
+
except:
|
967
|
+
Input = standard_scaler(None, Input, scaler_params)
|
953
968
|
|
954
969
|
layers = ['fex']
|
955
970
|
|
@@ -962,8 +977,6 @@ def predict_model_ram(Input, scaler_params, W, activation_potentiation=None):
|
|
962
977
|
neural_layer = neural_layer.ravel()
|
963
978
|
for index, Layer in enumerate(layers):
|
964
979
|
|
965
|
-
neural_layer = normalization(neural_layer)
|
966
|
-
|
967
980
|
if Layer == 'fex':
|
968
981
|
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
|
969
982
|
elif Layer == 'cat':
|
@@ -1005,7 +1018,7 @@ def auto_balancer(x_train, y_train):
|
|
1005
1018
|
MinCount = min(classes)
|
1006
1019
|
|
1007
1020
|
BalancedIndices = []
|
1008
|
-
for i in range(class_count):
|
1021
|
+
for i in tqdm(range(class_count),leave=False,desc='Balancing Data',ncols=120):
|
1009
1022
|
if len(ClassIndices[i]) > MinCount:
|
1010
1023
|
SelectedIndices = np.random.choice(
|
1011
1024
|
ClassIndices[i], MinCount, replace=False)
|
@@ -1022,7 +1035,7 @@ def auto_balancer(x_train, y_train):
|
|
1022
1035
|
print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoauto_balancer)
|
1023
1036
|
return 'e'
|
1024
1037
|
|
1025
|
-
return BalancedInputs, BalancedLabels
|
1038
|
+
return np.array(BalancedInputs), np.array(BalancedLabels)
|
1026
1039
|
|
1027
1040
|
|
1028
1041
|
def synthetic_augmentation(x_train, y_train):
|
@@ -1052,7 +1065,7 @@ def synthetic_augmentation(x_train, y_train):
|
|
1052
1065
|
x_balanced = list(x)
|
1053
1066
|
y_balanced = list(y)
|
1054
1067
|
|
1055
|
-
for class_label in range(class_count):
|
1068
|
+
for class_label in tqdm(range(class_count), leave=False, desc='Augmenting Data',ncols= 120):
|
1056
1069
|
class_indices = [i for i, label in enumerate(
|
1057
1070
|
y) if np.argmax(label) == class_label]
|
1058
1071
|
num_samples = len(class_indices)
|
@@ -1076,7 +1089,7 @@ def synthetic_augmentation(x_train, y_train):
|
|
1076
1089
|
return np.array(x_balanced), np.array(y_balanced)
|
1077
1090
|
|
1078
1091
|
|
1079
|
-
def standard_scaler(x_train, x_test, scaler_params=None):
|
1092
|
+
def standard_scaler(x_train, x_test=None, scaler_params=None):
|
1080
1093
|
info_standard_scaler = """
|
1081
1094
|
Standardizes training and test datasets. x_test may be None.
|
1082
1095
|
|
@@ -1084,7 +1097,7 @@ def standard_scaler(x_train, x_test, scaler_params=None):
|
|
1084
1097
|
train_data: numpy.ndarray
|
1085
1098
|
Training data
|
1086
1099
|
test_data: numpy.ndarray
|
1087
|
-
Test data
|
1100
|
+
Test data (optional)
|
1088
1101
|
|
1089
1102
|
Returns:
|
1090
1103
|
list:
|
@@ -1092,6 +1105,15 @@ def standard_scaler(x_train, x_test, scaler_params=None):
|
|
1092
1105
|
tuple
|
1093
1106
|
Standardized training and test datasets
|
1094
1107
|
"""
|
1108
|
+
try:
|
1109
|
+
|
1110
|
+
x_train = x_train.tolist()
|
1111
|
+
x_test = x_test.tolist()
|
1112
|
+
|
1113
|
+
except:
|
1114
|
+
|
1115
|
+
pass
|
1116
|
+
|
1095
1117
|
try:
|
1096
1118
|
|
1097
1119
|
if scaler_params == None and x_test != None:
|
@@ -1129,7 +1151,7 @@ def standard_scaler(x_train, x_test, scaler_params=None):
|
|
1129
1151
|
|
1130
1152
|
except:
|
1131
1153
|
print(
|
1132
|
-
Fore.RED + "ERROR: x_train and x_test must be list[numpyarray] from standard_scaler" + info_standard_scaler)
|
1154
|
+
Fore.RED + "ERROR: x_train and x_test must be list[numpyarray] from standard_scaler" + info_standard_scaler + Style.RESET_ALL)
|
1133
1155
|
|
1134
1156
|
|
1135
1157
|
def encode_one_hot(y_train, y_test):
|
@@ -1143,21 +1165,18 @@ def encode_one_hot(y_train, y_test):
|
|
1143
1165
|
Returns:
|
1144
1166
|
tuple: One-hot encoded y_train ve y_test verileri.
|
1145
1167
|
"""
|
1146
|
-
|
1147
|
-
|
1148
|
-
class_count = len(classes)
|
1168
|
+
classes = np.unique(y_train)
|
1169
|
+
class_count = len(classes)
|
1149
1170
|
|
1150
|
-
|
1171
|
+
class_to_index = {cls: idx for idx, cls in enumerate(classes)}
|
1151
1172
|
|
1152
|
-
|
1153
|
-
|
1154
|
-
|
1173
|
+
y_train_encoded = np.zeros((y_train.shape[0], class_count))
|
1174
|
+
for i, label in enumerate(y_train):
|
1175
|
+
y_train_encoded[i, class_to_index[label]] = 1
|
1155
1176
|
|
1156
|
-
|
1157
|
-
|
1158
|
-
|
1159
|
-
except:
|
1160
|
-
print(Fore.RED + 'ERROR: y_train and y_test must be numpy array. from: one_hot_encode' + info_one_hot_encode)
|
1177
|
+
y_test_encoded = np.zeros((y_test.shape[0], class_count))
|
1178
|
+
for i, label in enumerate(y_test):
|
1179
|
+
y_test_encoded[i, class_to_index[label]] = 1
|
1161
1180
|
|
1162
1181
|
return y_train_encoded, y_test_encoded
|
1163
1182
|
|
@@ -1175,7 +1194,6 @@ def split(X, y, test_size, random_state):
|
|
1175
1194
|
Returns:
|
1176
1195
|
tuple: x_train, x_test, y_train, y_test as ordered training and testing data subsets.
|
1177
1196
|
"""
|
1178
|
-
# Size of the dataset
|
1179
1197
|
num_samples = X.shape[0]
|
1180
1198
|
|
1181
1199
|
if isinstance(test_size, float):
|
@@ -1392,7 +1410,6 @@ def plot_evaluate(y_test, y_preds, acc_list):
|
|
1392
1410
|
|
1393
1411
|
# Confusion matrix
|
1394
1412
|
cm = confusion_matrix(y_true, y_preds, len(Class))
|
1395
|
-
# Subplot içinde düzenleme
|
1396
1413
|
fig, axs = plt.subplots(2, 2, figsize=(16, 12))
|
1397
1414
|
|
1398
1415
|
# Confusion Matrix
|
@@ -1511,16 +1528,19 @@ def manuel_balancer(x_train, y_train, target_samples_per_class):
|
|
1511
1528
|
x_balanced -- Balanced input dataset (NumPy array format)
|
1512
1529
|
y_balanced -- Balanced class labels (one-hot encoded, NumPy array format)
|
1513
1530
|
"""
|
1514
|
-
|
1515
|
-
|
1516
|
-
|
1531
|
+
try:
|
1532
|
+
x_train = np.array(x_train)
|
1533
|
+
y_train = np.array(y_train)
|
1534
|
+
except:
|
1535
|
+
print(Fore.GREEN + "x_tarin and y_train already numpyarray." + Style.RESET_ALL)
|
1536
|
+
pass
|
1517
1537
|
classes = np.arange(y_train.shape[1])
|
1518
1538
|
class_count = len(classes)
|
1519
1539
|
|
1520
1540
|
x_balanced = []
|
1521
1541
|
y_balanced = []
|
1522
1542
|
|
1523
|
-
for class_label in range(class_count):
|
1543
|
+
for class_label in tqdm(range(class_count),leave=False, desc='Augmenting Data',ncols= 120):
|
1524
1544
|
class_indices = np.where(np.argmax(y_train, axis=1) == class_label)[0]
|
1525
1545
|
num_samples = len(class_indices)
|
1526
1546
|
|
@@ -1542,8 +1562,7 @@ def manuel_balancer(x_train, y_train, target_samples_per_class):
|
|
1542
1562
|
additional_labels = np.zeros((samples_to_add, y_train.shape[1]))
|
1543
1563
|
|
1544
1564
|
for i in range(samples_to_add):
|
1545
|
-
|
1546
|
-
|
1565
|
+
|
1547
1566
|
random_indices = np.random.choice(class_indices, 2, replace=False)
|
1548
1567
|
sample1 = x_train[random_indices[0]]
|
1549
1568
|
sample2 = x_train[random_indices[1]]
|
@@ -1554,41 +1573,10 @@ def manuel_balancer(x_train, y_train, target_samples_per_class):
|
|
1554
1573
|
additional_samples[i] = synthetic_sample
|
1555
1574
|
additional_labels[i] = y_train[class_indices[0]]
|
1556
1575
|
|
1557
|
-
|
1558
|
-
calculating_est = round(
|
1559
|
-
(uni_end_time - uni_start_time) * (samples_to_add - i), 3)
|
1560
|
-
|
1561
|
-
if calculating_est < 60:
|
1562
|
-
print('\rest......(sec):', calculating_est, '\n', end="")
|
1563
|
-
|
1564
|
-
elif calculating_est > 60 and calculating_est < 3600:
|
1565
|
-
print('\rest......(min):', calculating_est/60, '\n', end="")
|
1566
|
-
|
1567
|
-
elif calculating_est > 3600:
|
1568
|
-
print('\rest......(h):', calculating_est/3600, '\n', end="")
|
1569
|
-
|
1570
|
-
print('Augmenting: ', class_label, '/', class_count, '...', i, "/", samples_to_add, "\n", end="")
|
1571
|
-
|
1576
|
+
|
1572
1577
|
x_balanced.append(additional_samples)
|
1573
1578
|
y_balanced.append(additional_labels)
|
1574
|
-
|
1575
|
-
|
1576
|
-
EndTime = time.time()
|
1577
|
-
|
1578
|
-
calculating_est = round(EndTime - start_time, 2)
|
1579
|
-
|
1580
|
-
print(Fore.GREEN + " \nBalancing Finished with 0 ERROR\n" + Style.RESET_ALL)
|
1581
|
-
|
1582
|
-
if calculating_est < 60:
|
1583
|
-
print('Total balancing time(sec): ', calculating_est)
|
1584
|
-
|
1585
|
-
elif calculating_est > 60 and calculating_est < 3600:
|
1586
|
-
print('Total balancing time(min): ', calculating_est/60)
|
1587
|
-
|
1588
|
-
elif calculating_est > 3600:
|
1589
|
-
print('Total balancing time(h): ', calculating_est/3600)
|
1590
1579
|
|
1591
|
-
# Stack the balanced arrays
|
1592
1580
|
x_balanced = np.vstack(x_balanced)
|
1593
1581
|
y_balanced = np.vstack(y_balanced)
|
1594
1582
|
|
@@ -1,7 +1,7 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 2.
|
4
|
-
Summary:
|
3
|
+
Version: 2.7.8
|
4
|
+
Summary: Code improvements
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
7
7
|
Keywords: model evaluation,classifcation,potentiation learning artficial neural networks
|
@@ -0,0 +1,6 @@
|
|
1
|
+
plan/__init__.py,sha256=gmaz8lnQfl18MbOQwabBUPmShajK5S99jfyY-hQe8tc,502
|
2
|
+
plan/plan.py,sha256=h1u6hk_wTjbCGkKhnAz6jidxTogmMTM2AEm7nxE9WQQ,52805
|
3
|
+
pyerualjetwork-2.7.8.dist-info/METADATA,sha256=921fYXjv_44__3jjG8aZWt_iHE3I6rbNdHEar_vIDOw,244
|
4
|
+
pyerualjetwork-2.7.8.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92
|
5
|
+
pyerualjetwork-2.7.8.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
|
6
|
+
pyerualjetwork-2.7.8.dist-info/RECORD,,
|
@@ -1,6 +0,0 @@
|
|
1
|
-
plan/__init__.py,sha256=gmaz8lnQfl18MbOQwabBUPmShajK5S99jfyY-hQe8tc,502
|
2
|
-
plan/plan.py,sha256=iICeYTx7-I4HRFqUFOsfuG1Nlp1vcYIWRiw3D960gGI,53301
|
3
|
-
pyerualjetwork-2.6.6.dist-info/METADATA,sha256=cfxWuZznzuP91MhQ3Vrro8INQF7l0LW_5YaSAw0GyEM,338
|
4
|
-
pyerualjetwork-2.6.6.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
5
|
-
pyerualjetwork-2.6.6.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
|
6
|
-
pyerualjetwork-2.6.6.dist-info/RECORD,,
|
File without changes
|