pyerualjetwork 2.2.8__tar.gz → 2.3.8__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork-2.3.8/PKG-INFO +7 -0
- {pyerualjetwork-2.2.8 → pyerualjetwork-2.3.8}/plan_bi/__init__.py +1 -1
- {pyerualjetwork-2.2.8 → pyerualjetwork-2.3.8}/plan_bi/plan_bi.py +187 -26
- {pyerualjetwork-2.2.8 → pyerualjetwork-2.3.8}/plan_di/__init__.py +1 -1
- {pyerualjetwork-2.2.8 → pyerualjetwork-2.3.8}/plan_di/plan_di.py +216 -34
- pyerualjetwork-2.3.8/pyerualjetwork.egg-info/PKG-INFO +7 -0
- {pyerualjetwork-2.2.8 → pyerualjetwork-2.3.8}/setup.py +2 -2
- pyerualjetwork-2.2.8/PKG-INFO +0 -7
- pyerualjetwork-2.2.8/pyerualjetwork.egg-info/PKG-INFO +0 -7
- {pyerualjetwork-2.2.8 → pyerualjetwork-2.3.8}/pyerualjetwork.egg-info/SOURCES.txt +0 -0
- {pyerualjetwork-2.2.8 → pyerualjetwork-2.3.8}/pyerualjetwork.egg-info/dependency_links.txt +0 -0
- {pyerualjetwork-2.2.8 → pyerualjetwork-2.3.8}/pyerualjetwork.egg-info/top_level.txt +0 -0
- {pyerualjetwork-2.2.8 → pyerualjetwork-2.3.8}/setup.cfg +0 -0
@@ -0,0 +1,7 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: pyerualjetwork
|
3
|
+
Version: 2.3.8
|
4
|
+
Summary: Weights post process function added: [weight_post_process](optional after training before testing.), new function: manuel_balancer. And scaler_params added for scaled models.
|
5
|
+
Author: Hasan Can Beydili
|
6
|
+
Author-email: tchasancan@gmail.com
|
7
|
+
Keywords: model evaluation,classifcation,pruning learning artficial neural networks
|
@@ -2,4 +2,4 @@
|
|
2
2
|
|
3
3
|
# Bu dosya, plan modülünün ana giriş noktasıdır.
|
4
4
|
|
5
|
-
from .plan_bi import auto_balancer, normalization, Softmax, Sigmoid, Relu, weight_identification, fex, fit, evaluate, save_model, load_model, predict_model_ssd, predict_model_ram, get_weights, get_df, get_preds, get_acc, get_pot, synthetic_augmentation, standard_scaler, multiple_evaluate, encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate
|
5
|
+
from .plan_bi import auto_balancer, normalization, Softmax, Sigmoid, Relu, weight_identification, fex, fit, evaluate, save_model, load_model, predict_model_ssd, predict_model_ram, get_weights, get_df, get_preds, get_acc, get_pot, synthetic_augmentation, standard_scaler, multiple_evaluate, encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate, manuel_balancer, weight_normalization
|
@@ -17,6 +17,7 @@ def fit(
|
|
17
17
|
x_train: List[Union[int, float]],
|
18
18
|
y_train: List[Union[int, float, str]], # At least two.. and one hot encoded
|
19
19
|
activation_potential: Union[float],
|
20
|
+
show_training
|
20
21
|
) -> str:
|
21
22
|
|
22
23
|
infoPLAN = """
|
@@ -26,6 +27,7 @@ def fit(
|
|
26
27
|
x_train (list[num]): List of input data.
|
27
28
|
y_train (list[num]): List of y_train. (one hot encoded)
|
28
29
|
activation_potential (float): Input activation potential
|
30
|
+
show_training (bool, str): True, None or 'final'
|
29
31
|
|
30
32
|
Returns:
|
31
33
|
list([num]): (Weight matrices list, train_predictions list, Train_acc).
|
@@ -85,6 +87,34 @@ def fit(
|
|
85
87
|
|
86
88
|
for i, w in enumerate(W):
|
87
89
|
trained_W[i] = trained_W[i] + w
|
90
|
+
|
91
|
+
if show_training == True:
|
92
|
+
|
93
|
+
fig, ax = plt.subplots(1, 10, figsize=(18, 14))
|
94
|
+
|
95
|
+
try:
|
96
|
+
row = x_train[1].shape[0]
|
97
|
+
col = x_train[1].shape[1]
|
98
|
+
except:
|
99
|
+
print(Fore.RED + 'ERROR: You try train showing but inputs is raveled. x_train inputs to must be reshape for training_show.', infoPLAN + Style.RESET_ALL)
|
100
|
+
return 'e'
|
101
|
+
|
102
|
+
for j in range(10):
|
103
|
+
|
104
|
+
|
105
|
+
mat = trained_W[0][j,:].reshape(row, col)
|
106
|
+
|
107
|
+
|
108
|
+
|
109
|
+
ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
|
110
|
+
ax[j].set_aspect('equal')
|
111
|
+
|
112
|
+
ax[j].set_xticks([])
|
113
|
+
ax[j].set_yticks([])
|
114
|
+
ax[j].set_title(f'{j+1}. Neuron')
|
115
|
+
|
116
|
+
|
117
|
+
plt.show()
|
88
118
|
|
89
119
|
|
90
120
|
W = weight_identification(len(layers) - 1, len(class_count), neurons, x_train_size)
|
@@ -127,6 +157,27 @@ def fit(
|
|
127
157
|
trained_W.append(np.eye(len(class_count)))
|
128
158
|
|
129
159
|
return trained_W
|
160
|
+
|
161
|
+
def weight_normalization(
|
162
|
+
W,
|
163
|
+
class_count
|
164
|
+
) -> str:
|
165
|
+
"""
|
166
|
+
Row(Neuron) based normalization. For unbalanced models.
|
167
|
+
|
168
|
+
Args:
|
169
|
+
W (list(num)): Trained weight matrix list.
|
170
|
+
class_count (int): Class count of model.
|
171
|
+
|
172
|
+
Returns:
|
173
|
+
list([numpy_arrays],[...]): posttrained weight matices of the model. .
|
174
|
+
"""
|
175
|
+
|
176
|
+
for i in range(class_count):
|
177
|
+
|
178
|
+
W[0][i,:] = normalization(W[0][i,:])
|
179
|
+
|
180
|
+
return W
|
130
181
|
|
131
182
|
# FUNCTIONS -----
|
132
183
|
|
@@ -528,7 +579,7 @@ def save_model(model_name,
|
|
528
579
|
weights_type,
|
529
580
|
weights_format,
|
530
581
|
model_path,
|
531
|
-
|
582
|
+
scaler_params,
|
532
583
|
W
|
533
584
|
):
|
534
585
|
|
@@ -544,7 +595,7 @@ def save_model(model_name,
|
|
544
595
|
weights_type (str): Type of weights to save (options: 'txt', 'npy', 'mat').
|
545
596
|
WeightFormat (str): Format of the weights (options: 'd', 'f', 'raw').
|
546
597
|
model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
|
547
|
-
|
598
|
+
scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
548
599
|
W: Weights list of the model.
|
549
600
|
|
550
601
|
Returns:
|
@@ -592,7 +643,7 @@ def save_model(model_name,
|
|
592
643
|
'WEIGHTS TYPE': weights_type,
|
593
644
|
'WEIGHTS FORMAT': weights_format,
|
594
645
|
'MODEL PATH': model_path,
|
595
|
-
'STANDARD SCALER':
|
646
|
+
'STANDARD SCALER': scaler_params
|
596
647
|
}
|
597
648
|
try:
|
598
649
|
|
@@ -742,11 +793,11 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
742
793
|
"""
|
743
794
|
W, activation_potential, df = load_model(model_name,model_path)
|
744
795
|
|
745
|
-
|
796
|
+
scaler_params = str(df['STANDARD SCALER'].iloc[0])
|
746
797
|
|
747
|
-
if
|
798
|
+
if scaler_params != None:
|
748
799
|
|
749
|
-
Input = standard_scaler(Input,
|
800
|
+
Input = standard_scaler(None, Input, scaler_params)
|
750
801
|
|
751
802
|
layers = ['fex','cat']
|
752
803
|
|
@@ -773,7 +824,7 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
773
824
|
return neural_layer
|
774
825
|
|
775
826
|
|
776
|
-
def predict_model_ram(Input, activation_potential,
|
827
|
+
def predict_model_ram(Input, activation_potential, scaler_params, W):
|
777
828
|
|
778
829
|
infopredict_model_ram = """
|
779
830
|
Function to make a prediction using a divided pruning learning artificial neural network (PLAN).
|
@@ -782,15 +833,15 @@ def predict_model_ram(Input, activation_potential, scaler, W):
|
|
782
833
|
Arguments:
|
783
834
|
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
784
835
|
activation_potential (float): Activation potential.
|
785
|
-
|
836
|
+
scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
786
837
|
W (list of ndarrays): Weights of the model.
|
787
838
|
|
788
839
|
Returns:
|
789
840
|
ndarray: Output from the model.
|
790
841
|
"""
|
791
|
-
if
|
842
|
+
if scaler_params != None:
|
792
843
|
|
793
|
-
Input = standard_scaler(Input,
|
844
|
+
Input = standard_scaler(None, Input, scaler_params)
|
794
845
|
|
795
846
|
layers = ['fex','cat']
|
796
847
|
|
@@ -917,7 +968,7 @@ def synthetic_augmentation(x_train, y_train):
|
|
917
968
|
return np.array(x_balanced), np.array(y_balanced)
|
918
969
|
|
919
970
|
|
920
|
-
def standard_scaler(x_train, x_test):
|
971
|
+
def standard_scaler(x_train, x_test, scaler_params=None):
|
921
972
|
info_standard_scaler = """
|
922
973
|
Standardizes training and test datasets. x_test may be None.
|
923
974
|
|
@@ -928,29 +979,42 @@ def standard_scaler(x_train, x_test):
|
|
928
979
|
Test data
|
929
980
|
|
930
981
|
Returns:
|
982
|
+
list:
|
983
|
+
Scaler parameters: mean and std
|
931
984
|
tuple
|
932
985
|
Standardized training and test datasets
|
933
986
|
"""
|
934
987
|
try:
|
935
|
-
|
936
|
-
|
937
|
-
|
938
|
-
|
939
|
-
|
940
|
-
|
941
|
-
|
942
|
-
|
943
|
-
|
944
|
-
|
945
|
-
|
946
|
-
|
947
|
-
|
988
|
+
|
989
|
+
if scaler_params == None and x_test != None:
|
990
|
+
|
991
|
+
mean = np.mean(x_train, axis=0)
|
992
|
+
std = np.std(x_train, axis=0)
|
993
|
+
train_data_scaled = (x_train - mean) / std
|
994
|
+
test_data_scaled = (x_test - mean) / std
|
995
|
+
|
996
|
+
scaler_params = [mean, std]
|
997
|
+
|
998
|
+
return scaler_params, train_data_scaled, test_data_scaled
|
999
|
+
|
1000
|
+
if scaler_params == None and x_test == None:
|
1001
|
+
|
1002
|
+
mean = np.mean(x_train, axis=0)
|
1003
|
+
std = np.std(x_train, axis=0)
|
1004
|
+
train_data_scaled = (x_train - mean) / std
|
1005
|
+
|
1006
|
+
scaler_params = [mean, std]
|
1007
|
+
|
1008
|
+
return scaler_params, train_data_scaled
|
1009
|
+
|
1010
|
+
if scaler_params != None:
|
1011
|
+
test_data_scaled = (x_test - scaler_params[0]) / scaler_params[1]
|
1012
|
+
return test_data_scaled
|
948
1013
|
|
949
1014
|
except:
|
950
1015
|
print(
|
951
1016
|
Fore.RED + "ERROR: x_train and x_test must be list[numpyarray] from standard_scaler" + info_standard_scaler)
|
952
1017
|
|
953
|
-
|
954
1018
|
def encode_one_hot(y_train, y_test):
|
955
1019
|
info_one_hot_encode = """
|
956
1020
|
Performs one-hot encoding on y_train and y_test data..
|
@@ -1304,7 +1368,7 @@ def plot_evaluate(y_test, y_preds, acc_list):
|
|
1304
1368
|
axs[0, 1].set_ylim(0, 1) # Y eksenini 0 ile 1 arasında sınırla
|
1305
1369
|
axs[0, 1].set_xlabel('Metrics')
|
1306
1370
|
axs[0, 1].set_ylabel('Score')
|
1307
|
-
axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy (
|
1371
|
+
axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy (Weighted)')
|
1308
1372
|
axs[0, 1].grid(True, axis='y', linestyle='--', alpha=0.7)
|
1309
1373
|
|
1310
1374
|
# Accuracy
|
@@ -1324,6 +1388,103 @@ def plot_evaluate(y_test, y_preds, acc_list):
|
|
1324
1388
|
plt.tight_layout()
|
1325
1389
|
plt.show()
|
1326
1390
|
|
1391
|
+
|
1392
|
+
def manuel_balancer(x_train, y_train, target_samples_per_class):
|
1393
|
+
"""
|
1394
|
+
Generates synthetic examples to balance classes to the specified number of examples per class.
|
1395
|
+
|
1396
|
+
Arguments:
|
1397
|
+
x_train -- Input dataset (examples) - NumPy array format
|
1398
|
+
y_train -- Class labels (one-hot encoded) - NumPy array format
|
1399
|
+
target_samples_per_class -- Desired number of samples per class
|
1400
|
+
|
1401
|
+
Returns:
|
1402
|
+
x_balanced -- Balanced input dataset (NumPy array format)
|
1403
|
+
y_balanced -- Balanced class labels (one-hot encoded, NumPy array format)
|
1404
|
+
"""
|
1405
|
+
start_time = time.time()
|
1406
|
+
x_train = np.array(x_train)
|
1407
|
+
y_train = np.array(y_train)
|
1408
|
+
classes = np.arange(y_train.shape[1])
|
1409
|
+
class_count = len(classes)
|
1410
|
+
|
1411
|
+
x_balanced = []
|
1412
|
+
y_balanced = []
|
1413
|
+
|
1414
|
+
for class_label in range(class_count):
|
1415
|
+
class_indices = np.where(np.argmax(y_train, axis=1) == class_label)[0]
|
1416
|
+
num_samples = len(class_indices)
|
1417
|
+
|
1418
|
+
if num_samples > target_samples_per_class:
|
1419
|
+
|
1420
|
+
selected_indices = np.random.choice(class_indices, target_samples_per_class, replace=False)
|
1421
|
+
x_balanced.append(x_train[selected_indices])
|
1422
|
+
y_balanced.append(y_train[selected_indices])
|
1423
|
+
|
1424
|
+
else:
|
1425
|
+
|
1426
|
+
x_balanced.append(x_train[class_indices])
|
1427
|
+
y_balanced.append(y_train[class_indices])
|
1428
|
+
|
1429
|
+
if num_samples < target_samples_per_class:
|
1430
|
+
|
1431
|
+
samples_to_add = target_samples_per_class - num_samples
|
1432
|
+
additional_samples = np.zeros((samples_to_add, x_train.shape[1]))
|
1433
|
+
additional_labels = np.zeros((samples_to_add, y_train.shape[1]))
|
1434
|
+
|
1435
|
+
for i in range(samples_to_add):
|
1436
|
+
uni_start_time = time.time()
|
1437
|
+
|
1438
|
+
random_indices = np.random.choice(class_indices, 2, replace=False)
|
1439
|
+
sample1 = x_train[random_indices[0]]
|
1440
|
+
sample2 = x_train[random_indices[1]]
|
1441
|
+
|
1442
|
+
|
1443
|
+
synthetic_sample = sample1 + (sample2 - sample1) * np.random.rand()
|
1444
|
+
|
1445
|
+
additional_samples[i] = synthetic_sample
|
1446
|
+
additional_labels[i] = y_train[class_indices[0]]
|
1447
|
+
|
1448
|
+
uni_end_time = time.time()
|
1449
|
+
calculating_est = round(
|
1450
|
+
(uni_end_time - uni_start_time) * (samples_to_add - i), 3)
|
1451
|
+
|
1452
|
+
if calculating_est < 60:
|
1453
|
+
print('\rest......(sec):', calculating_est, '\n', end="")
|
1454
|
+
|
1455
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
1456
|
+
print('\rest......(min):', calculating_est/60, '\n', end="")
|
1457
|
+
|
1458
|
+
elif calculating_est > 3600:
|
1459
|
+
print('\rest......(h):', calculating_est/3600, '\n', end="")
|
1460
|
+
|
1461
|
+
print('Augmenting: ', class_label, '/', class_count, '...', i, "/", samples_to_add, "\n", end="")
|
1462
|
+
|
1463
|
+
x_balanced.append(additional_samples)
|
1464
|
+
y_balanced.append(additional_labels)
|
1465
|
+
|
1466
|
+
|
1467
|
+
EndTime = time.time()
|
1468
|
+
|
1469
|
+
calculating_est = round(EndTime - start_time, 2)
|
1470
|
+
|
1471
|
+
print(Fore.GREEN + " \nBalancing Finished with 0 ERROR\n" + Style.RESET_ALL)
|
1472
|
+
|
1473
|
+
if calculating_est < 60:
|
1474
|
+
print('Total balancing time(sec): ', calculating_est)
|
1475
|
+
|
1476
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
1477
|
+
print('Total balancing time(min): ', calculating_est/60)
|
1478
|
+
|
1479
|
+
elif calculating_est > 3600:
|
1480
|
+
print('Total balancing time(h): ', calculating_est/3600)
|
1481
|
+
|
1482
|
+
# Stack the balanced arrays
|
1483
|
+
x_balanced = np.vstack(x_balanced)
|
1484
|
+
y_balanced = np.vstack(y_balanced)
|
1485
|
+
|
1486
|
+
return x_balanced, y_balanced
|
1487
|
+
|
1327
1488
|
def get_weights():
|
1328
1489
|
|
1329
1490
|
return 0
|
@@ -2,4 +2,4 @@
|
|
2
2
|
|
3
3
|
# Bu dosya, plan modülünün ana giriş noktasıdır.
|
4
4
|
|
5
|
-
from .plan_di import auto_balancer, normalization, Softmax, Sigmoid, Relu, weight_identification, fex, fit, evaluate, save_model, load_model, predict_model_ssd, predict_model_ram, get_weights, get_df, get_preds, get_acc, synthetic_augmentation, standard_scaler, multiple_evaluate, encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate
|
5
|
+
from .plan_di import auto_balancer, normalization, Softmax, Sigmoid, Relu, weight_identification, fex, fit, evaluate, save_model, load_model, predict_model_ssd, predict_model_ram, get_weights, get_df, get_preds, get_acc, synthetic_augmentation, standard_scaler, multiple_evaluate, encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate, manuel_balancer, weight_normalization
|
@@ -21,7 +21,8 @@ import seaborn as sns
|
|
21
21
|
def fit(
|
22
22
|
x_train: List[Union[int, float]],
|
23
23
|
# At least two.. and one hot encoded
|
24
|
-
y_train: List[Union[int, float, str]],
|
24
|
+
y_train: List[Union[int, float, str]], # At least two.. and one hot encoded
|
25
|
+
show_training
|
25
26
|
) -> str:
|
26
27
|
|
27
28
|
infoPLAN = """
|
@@ -30,7 +31,7 @@ def fit(
|
|
30
31
|
Args:
|
31
32
|
x_train (list[num]): List of input data.
|
32
33
|
y_train (list[num]): List of y_train. (one hot encoded)
|
33
|
-
|
34
|
+
show_training (bool, str): True, None or 'final'
|
34
35
|
|
35
36
|
Returns:
|
36
37
|
list([num]): (Weight matrices list, train_predictions list, Train_acc).
|
@@ -57,13 +58,14 @@ def fit(
|
|
57
58
|
x_train[0] = x_train[0].ravel()
|
58
59
|
x_train_size = len(x_train[0])
|
59
60
|
|
60
|
-
W = weight_identification(
|
61
|
+
W = pdi.weight_identification(
|
61
62
|
len(layers) - 1, len(class_count), neurons, x_train_size)
|
62
63
|
|
63
64
|
trained_W = [1] * len(W)
|
64
65
|
print(Fore.GREEN + "Train Started with 0 ERROR" + Style.RESET_ALL)
|
65
66
|
start_time = time.time()
|
66
|
-
y = decode_one_hot(y_train)
|
67
|
+
y = pdi.decode_one_hot(y_train)
|
68
|
+
|
67
69
|
for index, inp in enumerate(x_train):
|
68
70
|
uni_start_time = time.time()
|
69
71
|
inp = np.array(inp)
|
@@ -78,15 +80,40 @@ def fit(
|
|
78
80
|
|
79
81
|
for Lindex, Layer in enumerate(layers):
|
80
82
|
|
81
|
-
neural_layer = normalization(neural_layer)
|
83
|
+
neural_layer = pdi.normalization(neural_layer)
|
82
84
|
|
83
85
|
if Layer == 'fex':
|
84
|
-
W[Lindex] = fex(neural_layer, W[Lindex], True, y[index])
|
86
|
+
W[Lindex] = pdi.fex(neural_layer, W[Lindex], True, y[index])
|
85
87
|
|
86
88
|
for i, w in enumerate(W):
|
87
89
|
trained_W[i] = trained_W[i] + w
|
88
90
|
|
89
|
-
|
91
|
+
if show_training == True:
|
92
|
+
|
93
|
+
fig, ax = plt.subplots(1, 10, figsize=(18, 14))
|
94
|
+
|
95
|
+
try:
|
96
|
+
row = x_train[1].shape[0]
|
97
|
+
col = x_train[1].shape[1]
|
98
|
+
except:
|
99
|
+
print(Fore.RED + 'ERROR: You try train showing but inputs is raveled. x_train inputs to must be reshape for training_show.', infoPLAN + Style.RESET_ALL)
|
100
|
+
return 'e'
|
101
|
+
|
102
|
+
for j in range(10):
|
103
|
+
|
104
|
+
mat = trained_W[0][j,:].reshape(row, col)
|
105
|
+
|
106
|
+
ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
|
107
|
+
ax[j].set_aspect('equal')
|
108
|
+
|
109
|
+
ax[j].set_xticks([])
|
110
|
+
ax[j].set_yticks([])
|
111
|
+
ax[j].set_title(f'{j+1}. Neuron')
|
112
|
+
|
113
|
+
|
114
|
+
plt.show()
|
115
|
+
|
116
|
+
W = pdi.weight_identification(
|
90
117
|
len(layers) - 1, len(class_count), neurons, x_train_size)
|
91
118
|
|
92
119
|
uni_end_time = time.time()
|
@@ -105,6 +132,31 @@ def fit(
|
|
105
132
|
|
106
133
|
print('\rTraining: ', index, "/", len(x_train), "\n", end="")
|
107
134
|
|
135
|
+
if show_training == 'final':
|
136
|
+
|
137
|
+
fig, ax = plt.subplots(1, 10, figsize=(18, 14))
|
138
|
+
|
139
|
+
try:
|
140
|
+
row = x_train[1].shape[0]
|
141
|
+
col = x_train[1].shape[1]
|
142
|
+
except:
|
143
|
+
print(Fore.RED + 'ERROR: You try train showing but inputs is raveled. x_train inputs to must be reshape for training_show.', infoPLAN + Style.RESET_ALL)
|
144
|
+
return 'e'
|
145
|
+
|
146
|
+
for j in range(10):
|
147
|
+
|
148
|
+
mat = trained_W[0][j,:].reshape(row, col)
|
149
|
+
|
150
|
+
ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
|
151
|
+
ax[j].set_aspect('equal')
|
152
|
+
|
153
|
+
ax[j].set_xticks([])
|
154
|
+
ax[j].set_yticks([])
|
155
|
+
ax[j].set_title(f'{j+1}. Neuron')
|
156
|
+
|
157
|
+
|
158
|
+
plt.show()
|
159
|
+
|
108
160
|
EndTime = time.time()
|
109
161
|
|
110
162
|
calculating_est = round(EndTime - start_time, 2)
|
@@ -122,11 +174,32 @@ def fit(
|
|
122
174
|
|
123
175
|
layers.append('cat')
|
124
176
|
trained_W.append(np.eye(len(class_count)))
|
125
|
-
|
177
|
+
|
178
|
+
|
126
179
|
return trained_W
|
127
180
|
|
128
181
|
# FUNCTIONS -----
|
129
182
|
|
183
|
+
def weight_normalization(
|
184
|
+
W,
|
185
|
+
class_count
|
186
|
+
) -> str:
|
187
|
+
"""
|
188
|
+
Row(Neuron) based normalization. For unbalanced models.
|
189
|
+
|
190
|
+
Args:
|
191
|
+
W (list(num)): Trained weight matrix list.
|
192
|
+
class_count (int): Class count of model.
|
193
|
+
|
194
|
+
Returns:
|
195
|
+
list([numpy_arrays],[...]): posttrained weight matices of the model. .
|
196
|
+
"""
|
197
|
+
|
198
|
+
for i in range(class_count):
|
199
|
+
|
200
|
+
W[0][i,:] = normalization(W[0][i,:])
|
201
|
+
|
202
|
+
return W
|
130
203
|
|
131
204
|
def weight_identification(
|
132
205
|
layer_count, # int: Number of layers in the neural network.
|
@@ -508,7 +581,7 @@ def save_model(model_name,
|
|
508
581
|
weights_type,
|
509
582
|
weights_format,
|
510
583
|
model_path,
|
511
|
-
|
584
|
+
scaler_params,
|
512
585
|
W
|
513
586
|
):
|
514
587
|
|
@@ -523,7 +596,7 @@ def save_model(model_name,
|
|
523
596
|
weights_type (str): Type of weights to save (options: 'txt', 'npy', 'mat').
|
524
597
|
WeightFormat (str): Format of the weights (options: 'd', 'f', 'raw').
|
525
598
|
model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
|
526
|
-
|
599
|
+
scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
527
600
|
W: Weights of the model.
|
528
601
|
|
529
602
|
Returns:
|
@@ -573,7 +646,7 @@ def save_model(model_name,
|
|
573
646
|
'WEIGHTS TYPE': weights_type,
|
574
647
|
'WEIGHTS FORMAT': weights_format,
|
575
648
|
'MODEL PATH': model_path,
|
576
|
-
'STANDARD SCALER':
|
649
|
+
'STANDARD SCALER': scaler_params
|
577
650
|
}
|
578
651
|
try:
|
579
652
|
|
@@ -720,11 +793,11 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
720
793
|
"""
|
721
794
|
W, df = load_model(model_name, model_path)
|
722
795
|
|
723
|
-
|
796
|
+
scaler_params = str(df['STANDARD SCALER'].iloc[0])
|
724
797
|
|
725
|
-
if
|
798
|
+
if scaler_params != None:
|
726
799
|
|
727
|
-
Input = standard_scaler(Input,
|
800
|
+
Input = standard_scaler(None, Input, scaler_params)
|
728
801
|
|
729
802
|
layers = ['fex', 'cat']
|
730
803
|
|
@@ -752,7 +825,7 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
752
825
|
return neural_layer
|
753
826
|
|
754
827
|
|
755
|
-
def predict_model_ram(Input,
|
828
|
+
def predict_model_ram(Input, scaler_params, W):
|
756
829
|
|
757
830
|
infopredict_model_ram = """
|
758
831
|
Function to make a prediction using a divided pruning learning artificial neural network (PLAN).
|
@@ -760,16 +833,16 @@ def predict_model_ram(Input, scaler, W):
|
|
760
833
|
|
761
834
|
Arguments:
|
762
835
|
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
763
|
-
|
836
|
+
scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
764
837
|
W (list of ndarrays): Weights of the model.
|
765
838
|
|
766
839
|
Returns:
|
767
840
|
ndarray: Output from the model.
|
768
841
|
"""
|
769
842
|
|
770
|
-
if
|
843
|
+
if scaler_params != None:
|
771
844
|
|
772
|
-
Input = standard_scaler(Input,
|
845
|
+
Input = standard_scaler(None, Input, scaler_params)
|
773
846
|
|
774
847
|
layers = ['fex', 'cat']
|
775
848
|
|
@@ -896,7 +969,7 @@ def synthetic_augmentation(x_train, y_train):
|
|
896
969
|
return np.array(x_balanced), np.array(y_balanced)
|
897
970
|
|
898
971
|
|
899
|
-
def standard_scaler(x_train, x_test):
|
972
|
+
def standard_scaler(x_train, x_test, scaler_params=None):
|
900
973
|
info_standard_scaler = """
|
901
974
|
Standardizes training and test datasets. x_test may be None.
|
902
975
|
|
@@ -907,29 +980,42 @@ def standard_scaler(x_train, x_test):
|
|
907
980
|
Test data
|
908
981
|
|
909
982
|
Returns:
|
983
|
+
list:
|
984
|
+
Scaler parameters: mean and std
|
910
985
|
tuple
|
911
986
|
Standardized training and test datasets
|
912
987
|
"""
|
913
988
|
try:
|
914
|
-
|
915
|
-
|
916
|
-
|
917
|
-
|
918
|
-
|
919
|
-
|
920
|
-
|
921
|
-
|
922
|
-
|
923
|
-
|
924
|
-
|
925
|
-
|
926
|
-
|
989
|
+
|
990
|
+
if scaler_params == None and x_test != None:
|
991
|
+
|
992
|
+
mean = np.mean(x_train, axis=0)
|
993
|
+
std = np.std(x_train, axis=0)
|
994
|
+
train_data_scaled = (x_train - mean) / std
|
995
|
+
test_data_scaled = (x_test - mean) / std
|
996
|
+
|
997
|
+
scaler_params = [mean, std]
|
998
|
+
|
999
|
+
return scaler_params, train_data_scaled, test_data_scaled
|
1000
|
+
|
1001
|
+
if scaler_params == None and x_test == None:
|
1002
|
+
|
1003
|
+
mean = np.mean(x_train, axis=0)
|
1004
|
+
std = np.std(x_train, axis=0)
|
1005
|
+
train_data_scaled = (x_train - mean) / std
|
1006
|
+
|
1007
|
+
scaler_params = [mean, std]
|
1008
|
+
|
1009
|
+
return scaler_params, train_data_scaled
|
1010
|
+
|
1011
|
+
if scaler_params != None:
|
1012
|
+
test_data_scaled = (x_test - scaler_params[0]) / scaler_params[1]
|
1013
|
+
return test_data_scaled
|
927
1014
|
|
928
1015
|
except:
|
929
1016
|
print(
|
930
1017
|
Fore.RED + "ERROR: x_train and x_test must be list[numpyarray] from standard_scaler" + info_standard_scaler)
|
931
1018
|
|
932
|
-
|
933
1019
|
def encode_one_hot(y_train, y_test):
|
934
1020
|
info_one_hot_encode = """
|
935
1021
|
Performs one-hot encoding on y_train and y_test data..
|
@@ -1276,7 +1362,7 @@ def plot_evaluate(y_test, y_preds, acc_list):
|
|
1276
1362
|
axs[0, 1].set_ylim(0, 1) # Y eksenini 0 ile 1 arasında sınırla
|
1277
1363
|
axs[0, 1].set_xlabel('Metrics')
|
1278
1364
|
axs[0, 1].set_ylabel('Score')
|
1279
|
-
axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy (
|
1365
|
+
axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy (Weighted)')
|
1280
1366
|
axs[0, 1].grid(True, axis='y', linestyle='--', alpha=0.7)
|
1281
1367
|
|
1282
1368
|
# Accuracy
|
@@ -1296,6 +1382,102 @@ def plot_evaluate(y_test, y_preds, acc_list):
|
|
1296
1382
|
plt.tight_layout()
|
1297
1383
|
plt.show()
|
1298
1384
|
|
1385
|
+
def manuel_balancer(x_train, y_train, target_samples_per_class):
|
1386
|
+
"""
|
1387
|
+
Generates synthetic examples to balance classes to the specified number of examples per class.
|
1388
|
+
|
1389
|
+
Arguments:
|
1390
|
+
x_train -- Input dataset (examples) - NumPy array format
|
1391
|
+
y_train -- Class labels (one-hot encoded) - NumPy array format
|
1392
|
+
target_samples_per_class -- Desired number of samples per class
|
1393
|
+
|
1394
|
+
Returns:
|
1395
|
+
x_balanced -- Balanced input dataset (NumPy array format)
|
1396
|
+
y_balanced -- Balanced class labels (one-hot encoded, NumPy array format)
|
1397
|
+
"""
|
1398
|
+
start_time = time.time()
|
1399
|
+
x_train = np.array(x_train)
|
1400
|
+
y_train = np.array(y_train)
|
1401
|
+
classes = np.arange(y_train.shape[1])
|
1402
|
+
class_count = len(classes)
|
1403
|
+
|
1404
|
+
x_balanced = []
|
1405
|
+
y_balanced = []
|
1406
|
+
|
1407
|
+
for class_label in range(class_count):
|
1408
|
+
class_indices = np.where(np.argmax(y_train, axis=1) == class_label)[0]
|
1409
|
+
num_samples = len(class_indices)
|
1410
|
+
|
1411
|
+
if num_samples > target_samples_per_class:
|
1412
|
+
|
1413
|
+
selected_indices = np.random.choice(class_indices, target_samples_per_class, replace=False)
|
1414
|
+
x_balanced.append(x_train[selected_indices])
|
1415
|
+
y_balanced.append(y_train[selected_indices])
|
1416
|
+
|
1417
|
+
else:
|
1418
|
+
|
1419
|
+
x_balanced.append(x_train[class_indices])
|
1420
|
+
y_balanced.append(y_train[class_indices])
|
1421
|
+
|
1422
|
+
if num_samples < target_samples_per_class:
|
1423
|
+
|
1424
|
+
samples_to_add = target_samples_per_class - num_samples
|
1425
|
+
additional_samples = np.zeros((samples_to_add, x_train.shape[1]))
|
1426
|
+
additional_labels = np.zeros((samples_to_add, y_train.shape[1]))
|
1427
|
+
|
1428
|
+
for i in range(samples_to_add):
|
1429
|
+
uni_start_time = time.time()
|
1430
|
+
|
1431
|
+
random_indices = np.random.choice(class_indices, 2, replace=False)
|
1432
|
+
sample1 = x_train[random_indices[0]]
|
1433
|
+
sample2 = x_train[random_indices[1]]
|
1434
|
+
|
1435
|
+
|
1436
|
+
synthetic_sample = sample1 + (sample2 - sample1) * np.random.rand()
|
1437
|
+
|
1438
|
+
additional_samples[i] = synthetic_sample
|
1439
|
+
additional_labels[i] = y_train[class_indices[0]]
|
1440
|
+
|
1441
|
+
uni_end_time = time.time()
|
1442
|
+
calculating_est = round(
|
1443
|
+
(uni_end_time - uni_start_time) * (samples_to_add - i), 3)
|
1444
|
+
|
1445
|
+
if calculating_est < 60:
|
1446
|
+
print('\rest......(sec):', calculating_est, '\n', end="")
|
1447
|
+
|
1448
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
1449
|
+
print('\rest......(min):', calculating_est/60, '\n', end="")
|
1450
|
+
|
1451
|
+
elif calculating_est > 3600:
|
1452
|
+
print('\rest......(h):', calculating_est/3600, '\n', end="")
|
1453
|
+
|
1454
|
+
print('Augmenting: ', class_label, '/', class_count, '...', i, "/", samples_to_add, "\n", end="")
|
1455
|
+
|
1456
|
+
x_balanced.append(additional_samples)
|
1457
|
+
y_balanced.append(additional_labels)
|
1458
|
+
|
1459
|
+
|
1460
|
+
EndTime = time.time()
|
1461
|
+
|
1462
|
+
calculating_est = round(EndTime - start_time, 2)
|
1463
|
+
|
1464
|
+
print(Fore.GREEN + " \nBalancing Finished with 0 ERROR\n" + Style.RESET_ALL)
|
1465
|
+
|
1466
|
+
if calculating_est < 60:
|
1467
|
+
print('Total balancing time(sec): ', calculating_est)
|
1468
|
+
|
1469
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
1470
|
+
print('Total balancing time(min): ', calculating_est/60)
|
1471
|
+
|
1472
|
+
elif calculating_est > 3600:
|
1473
|
+
print('Total balancing time(h): ', calculating_est/3600)
|
1474
|
+
|
1475
|
+
# Stack the balanced arrays
|
1476
|
+
x_balanced = np.vstack(x_balanced)
|
1477
|
+
y_balanced = np.vstack(y_balanced)
|
1478
|
+
|
1479
|
+
return x_balanced, y_balanced
|
1480
|
+
|
1299
1481
|
def get_weights():
|
1300
1482
|
|
1301
1483
|
return 0
|
@@ -0,0 +1,7 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: pyerualjetwork
|
3
|
+
Version: 2.3.8
|
4
|
+
Summary: Weights post process function added: [weight_post_process](optional after training before testing.), new function: manuel_balancer. And scaler_params added for scaled models.
|
5
|
+
Author: Hasan Can Beydili
|
6
|
+
Author-email: tchasancan@gmail.com
|
7
|
+
Keywords: model evaluation,classifcation,pruning learning artficial neural networks
|
@@ -5,10 +5,10 @@ from setuptools import setup, find_packages
|
|
5
5
|
setup(
|
6
6
|
|
7
7
|
name = "pyerualjetwork",
|
8
|
-
version = "2.
|
8
|
+
version = "2.3.8",
|
9
9
|
author = "Hasan Can Beydili",
|
10
10
|
author_email = "tchasancan@gmail.com",
|
11
|
-
description= "
|
11
|
+
description= "Weights post process function added: [weight_post_process](optional after training before testing.), new function: manuel_balancer. And scaler_params added for scaled models.",
|
12
12
|
packages = find_packages(),
|
13
13
|
keywords = ["model evaluation", "classifcation", 'pruning learning artficial neural networks'],
|
14
14
|
|
pyerualjetwork-2.2.8/PKG-INFO
DELETED
@@ -1,7 +0,0 @@
|
|
1
|
-
Metadata-Version: 2.1
|
2
|
-
Name: pyerualjetwork
|
3
|
-
Version: 2.2.8
|
4
|
-
Summary: Code improvements (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)
|
5
|
-
Author: Hasan Can Beydili
|
6
|
-
Author-email: tchasancan@gmail.com
|
7
|
-
Keywords: model evaluation,classifcation,pruning learning artficial neural networks
|
@@ -1,7 +0,0 @@
|
|
1
|
-
Metadata-Version: 2.1
|
2
|
-
Name: pyerualjetwork
|
3
|
-
Version: 2.2.8
|
4
|
-
Summary: Code improvements (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)
|
5
|
-
Author: Hasan Can Beydili
|
6
|
-
Author-email: tchasancan@gmail.com
|
7
|
-
Keywords: model evaluation,classifcation,pruning learning artficial neural networks
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|