pyerualjetwork 2.2.8__py3-none-any.whl → 2.2.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- plan_bi/__init__.py +1 -1
- plan_bi/plan_bi.py +135 -22
- plan_di/__init__.py +1 -1
- plan_di/plan_di.py +135 -22
- {pyerualjetwork-2.2.8.dist-info → pyerualjetwork-2.2.9.dist-info}/METADATA +2 -2
- pyerualjetwork-2.2.9.dist-info/RECORD +8 -0
- pyerualjetwork-2.2.8.dist-info/RECORD +0 -8
- {pyerualjetwork-2.2.8.dist-info → pyerualjetwork-2.2.9.dist-info}/WHEEL +0 -0
- {pyerualjetwork-2.2.8.dist-info → pyerualjetwork-2.2.9.dist-info}/top_level.txt +0 -0
plan_bi/__init__.py
CHANGED
@@ -2,4 +2,4 @@
|
|
2
2
|
|
3
3
|
# Bu dosya, plan modülünün ana giriş noktasıdır.
|
4
4
|
|
5
|
-
from .plan_bi import auto_balancer, normalization, Softmax, Sigmoid, Relu, weight_identification, fex, fit, evaluate, save_model, load_model, predict_model_ssd, predict_model_ram, get_weights, get_df, get_preds, get_acc, get_pot, synthetic_augmentation, standard_scaler, multiple_evaluate, encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate
|
5
|
+
from .plan_bi import auto_balancer, normalization, Softmax, Sigmoid, Relu, weight_identification, fex, fit, evaluate, save_model, load_model, predict_model_ssd, predict_model_ram, get_weights, get_df, get_preds, get_acc, get_pot, synthetic_augmentation, standard_scaler, multiple_evaluate, encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate, manuel_balancer
|
plan_bi/plan_bi.py
CHANGED
@@ -126,6 +126,14 @@ def fit(
|
|
126
126
|
layers.append('cat')
|
127
127
|
trained_W.append(np.eye(len(class_count)))
|
128
128
|
|
129
|
+
for i in range(len(class_count)):
|
130
|
+
|
131
|
+
for j in range(len(layers)):
|
132
|
+
|
133
|
+
if layers[j] == 'fex':
|
134
|
+
|
135
|
+
trained_W[j][i,:] = normalization(trained_W[j][i,:])
|
136
|
+
|
129
137
|
return trained_W
|
130
138
|
|
131
139
|
# FUNCTIONS -----
|
@@ -528,7 +536,7 @@ def save_model(model_name,
|
|
528
536
|
weights_type,
|
529
537
|
weights_format,
|
530
538
|
model_path,
|
531
|
-
|
539
|
+
scaler_params,
|
532
540
|
W
|
533
541
|
):
|
534
542
|
|
@@ -544,7 +552,7 @@ def save_model(model_name,
|
|
544
552
|
weights_type (str): Type of weights to save (options: 'txt', 'npy', 'mat').
|
545
553
|
WeightFormat (str): Format of the weights (options: 'd', 'f', 'raw').
|
546
554
|
model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
|
547
|
-
|
555
|
+
scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
548
556
|
W: Weights list of the model.
|
549
557
|
|
550
558
|
Returns:
|
@@ -592,7 +600,7 @@ def save_model(model_name,
|
|
592
600
|
'WEIGHTS TYPE': weights_type,
|
593
601
|
'WEIGHTS FORMAT': weights_format,
|
594
602
|
'MODEL PATH': model_path,
|
595
|
-
'STANDARD SCALER':
|
603
|
+
'STANDARD SCALER': scaler_params
|
596
604
|
}
|
597
605
|
try:
|
598
606
|
|
@@ -742,11 +750,11 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
742
750
|
"""
|
743
751
|
W, activation_potential, df = load_model(model_name,model_path)
|
744
752
|
|
745
|
-
|
753
|
+
scaler_params = str(df['STANDARD SCALER'].iloc[0])
|
746
754
|
|
747
|
-
if
|
755
|
+
if scaler_params != None:
|
748
756
|
|
749
|
-
Input = standard_scaler(Input, None)
|
757
|
+
Input = standard_scaler(scaler_params, Input, None)
|
750
758
|
|
751
759
|
layers = ['fex','cat']
|
752
760
|
|
@@ -773,7 +781,7 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
773
781
|
return neural_layer
|
774
782
|
|
775
783
|
|
776
|
-
def predict_model_ram(Input, activation_potential,
|
784
|
+
def predict_model_ram(Input, activation_potential, scaler_params, W):
|
777
785
|
|
778
786
|
infopredict_model_ram = """
|
779
787
|
Function to make a prediction using a divided pruning learning artificial neural network (PLAN).
|
@@ -782,15 +790,15 @@ def predict_model_ram(Input, activation_potential, scaler, W):
|
|
782
790
|
Arguments:
|
783
791
|
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
784
792
|
activation_potential (float): Activation potential.
|
785
|
-
|
793
|
+
scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
786
794
|
W (list of ndarrays): Weights of the model.
|
787
795
|
|
788
796
|
Returns:
|
789
797
|
ndarray: Output from the model.
|
790
798
|
"""
|
791
|
-
if
|
799
|
+
if scaler_params != None:
|
792
800
|
|
793
|
-
Input = standard_scaler(Input, None)
|
801
|
+
Input = standard_scaler(scaler_params, Input, None)
|
794
802
|
|
795
803
|
layers = ['fex','cat']
|
796
804
|
|
@@ -917,7 +925,7 @@ def synthetic_augmentation(x_train, y_train):
|
|
917
925
|
return np.array(x_balanced), np.array(y_balanced)
|
918
926
|
|
919
927
|
|
920
|
-
|
928
|
+
defdef standard_scaler(scaler_params=None, x_train, x_test):
|
921
929
|
info_standard_scaler = """
|
922
930
|
Standardizes training and test datasets. x_test may be None.
|
923
931
|
|
@@ -928,23 +936,31 @@ def standard_scaler(x_train, x_test):
|
|
928
936
|
Test data
|
929
937
|
|
930
938
|
Returns:
|
939
|
+
list:
|
940
|
+
Scaler parameters: mean and std
|
931
941
|
tuple
|
932
942
|
Standardized training and test datasets
|
933
943
|
"""
|
934
|
-
|
935
|
-
|
936
|
-
|
937
|
-
|
944
|
+
try:
|
945
|
+
|
946
|
+
if scaler_params == None:
|
947
|
+
mean = np.mean(x_train, axis=0)
|
948
|
+
std = np.std(x_train, axis=0)
|
949
|
+
scaler_params = [mean, std]
|
938
950
|
|
939
|
-
|
951
|
+
if x_test == None:
|
940
952
|
|
941
|
-
|
942
|
-
|
953
|
+
train_data_scaled = (x_train - mean) / std
|
954
|
+
return scaler_params, train_data_scaled
|
943
955
|
|
944
|
-
|
945
|
-
|
946
|
-
|
947
|
-
|
956
|
+
elif scaler_params == None:
|
957
|
+
train_data_scaled = (x_train - mean) / std
|
958
|
+
test_data_scaled = (x_test - mean) / std
|
959
|
+
return scaler_params, train_data_scaled, test_data_scaled
|
960
|
+
|
961
|
+
elif scaler_params != None:
|
962
|
+
test_data_scaled = (x_test - scaler_params[0]) / scaler_params[1]
|
963
|
+
return test_data_scaled
|
948
964
|
|
949
965
|
except:
|
950
966
|
print(
|
@@ -1324,6 +1340,103 @@ def plot_evaluate(y_test, y_preds, acc_list):
|
|
1324
1340
|
plt.tight_layout()
|
1325
1341
|
plt.show()
|
1326
1342
|
|
1343
|
+
|
1344
|
+
def manuel_balancer(x_train, y_train, target_samples_per_class):
|
1345
|
+
"""
|
1346
|
+
Generates synthetic examples to balance classes to the specified number of examples per class.
|
1347
|
+
|
1348
|
+
Arguments:
|
1349
|
+
x_train -- Input dataset (examples) - NumPy array format
|
1350
|
+
y_train -- Class labels (one-hot encoded) - NumPy array format
|
1351
|
+
target_samples_per_class -- Desired number of samples per class
|
1352
|
+
|
1353
|
+
Returns:
|
1354
|
+
x_balanced -- Balanced input dataset (NumPy array format)
|
1355
|
+
y_balanced -- Balanced class labels (one-hot encoded, NumPy array format)
|
1356
|
+
"""
|
1357
|
+
start_time = time.time()
|
1358
|
+
x_train = np.array(x_train)
|
1359
|
+
y_train = np.array(y_train)
|
1360
|
+
classes = np.arange(y_train.shape[1])
|
1361
|
+
class_count = len(classes)
|
1362
|
+
|
1363
|
+
x_balanced = []
|
1364
|
+
y_balanced = []
|
1365
|
+
|
1366
|
+
for class_label in range(class_count):
|
1367
|
+
class_indices = np.where(np.argmax(y_train, axis=1) == class_label)[0]
|
1368
|
+
num_samples = len(class_indices)
|
1369
|
+
|
1370
|
+
if num_samples > target_samples_per_class:
|
1371
|
+
|
1372
|
+
selected_indices = np.random.choice(class_indices, target_samples_per_class, replace=False)
|
1373
|
+
x_balanced.append(x_train[selected_indices])
|
1374
|
+
y_balanced.append(y_train[selected_indices])
|
1375
|
+
|
1376
|
+
else:
|
1377
|
+
|
1378
|
+
x_balanced.append(x_train[class_indices])
|
1379
|
+
y_balanced.append(y_train[class_indices])
|
1380
|
+
|
1381
|
+
if num_samples < target_samples_per_class:
|
1382
|
+
|
1383
|
+
samples_to_add = target_samples_per_class - num_samples
|
1384
|
+
additional_samples = np.zeros((samples_to_add, x_train.shape[1]))
|
1385
|
+
additional_labels = np.zeros((samples_to_add, y_train.shape[1]))
|
1386
|
+
|
1387
|
+
for i in range(samples_to_add):
|
1388
|
+
uni_start_time = time.time()
|
1389
|
+
|
1390
|
+
random_indices = np.random.choice(class_indices, 2, replace=False)
|
1391
|
+
sample1 = x_train[random_indices[0]]
|
1392
|
+
sample2 = x_train[random_indices[1]]
|
1393
|
+
|
1394
|
+
|
1395
|
+
synthetic_sample = sample1 + (sample2 - sample1) * np.random.rand()
|
1396
|
+
|
1397
|
+
additional_samples[i] = synthetic_sample
|
1398
|
+
additional_labels[i] = y_train[class_indices[0]]
|
1399
|
+
|
1400
|
+
uni_end_time = time.time()
|
1401
|
+
calculating_est = round(
|
1402
|
+
(uni_end_time - uni_start_time) * (samples_to_add - i), 3)
|
1403
|
+
|
1404
|
+
if calculating_est < 60:
|
1405
|
+
print('\rest......(sec):', calculating_est, '\n', end="")
|
1406
|
+
|
1407
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
1408
|
+
print('\rest......(min):', calculating_est/60, '\n', end="")
|
1409
|
+
|
1410
|
+
elif calculating_est > 3600:
|
1411
|
+
print('\rest......(h):', calculating_est/3600, '\n', end="")
|
1412
|
+
|
1413
|
+
print('Augmenting: ', class_label, '/', class_count, '...', i, "/", samples_to_add, "\n", end="")
|
1414
|
+
|
1415
|
+
x_balanced.append(additional_samples)
|
1416
|
+
y_balanced.append(additional_labels)
|
1417
|
+
|
1418
|
+
|
1419
|
+
EndTime = time.time()
|
1420
|
+
|
1421
|
+
calculating_est = round(EndTime - start_time, 2)
|
1422
|
+
|
1423
|
+
print(Fore.GREEN + " \nBalancing Finished with 0 ERROR\n" + Style.RESET_ALL)
|
1424
|
+
|
1425
|
+
if calculating_est < 60:
|
1426
|
+
print('Total balancing time(sec): ', calculating_est)
|
1427
|
+
|
1428
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
1429
|
+
print('Total balancing time(min): ', calculating_est/60)
|
1430
|
+
|
1431
|
+
elif calculating_est > 3600:
|
1432
|
+
print('Total balancing time(h): ', calculating_est/3600)
|
1433
|
+
|
1434
|
+
# Stack the balanced arrays
|
1435
|
+
x_balanced = np.vstack(x_balanced)
|
1436
|
+
y_balanced = np.vstack(y_balanced)
|
1437
|
+
|
1438
|
+
return x_balanced, y_balanced
|
1439
|
+
|
1327
1440
|
def get_weights():
|
1328
1441
|
|
1329
1442
|
return 0
|
plan_di/__init__.py
CHANGED
@@ -2,4 +2,4 @@
|
|
2
2
|
|
3
3
|
# Bu dosya, plan modülünün ana giriş noktasıdır.
|
4
4
|
|
5
|
-
from .plan_di import auto_balancer, normalization, Softmax, Sigmoid, Relu, weight_identification, fex, fit, evaluate, save_model, load_model, predict_model_ssd, predict_model_ram, get_weights, get_df, get_preds, get_acc, synthetic_augmentation, standard_scaler, multiple_evaluate, encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate
|
5
|
+
from .plan_di import auto_balancer, normalization, Softmax, Sigmoid, Relu, weight_identification, fex, fit, evaluate, save_model, load_model, predict_model_ssd, predict_model_ram, get_weights, get_df, get_preds, get_acc, synthetic_augmentation, standard_scaler, multiple_evaluate, encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate, manuel_balancer
|
plan_di/plan_di.py
CHANGED
@@ -122,6 +122,15 @@ def fit(
|
|
122
122
|
|
123
123
|
layers.append('cat')
|
124
124
|
trained_W.append(np.eye(len(class_count)))
|
125
|
+
|
126
|
+
for i in range(len(class_count)):
|
127
|
+
|
128
|
+
for j in range(len(layers)):
|
129
|
+
|
130
|
+
if layers[j] == 'fex':
|
131
|
+
|
132
|
+
trained_W[j][i,:] = normalization(trained_W[j][i,:])
|
133
|
+
|
125
134
|
|
126
135
|
return trained_W
|
127
136
|
|
@@ -508,7 +517,7 @@ def save_model(model_name,
|
|
508
517
|
weights_type,
|
509
518
|
weights_format,
|
510
519
|
model_path,
|
511
|
-
|
520
|
+
scaler_params,
|
512
521
|
W
|
513
522
|
):
|
514
523
|
|
@@ -523,7 +532,7 @@ def save_model(model_name,
|
|
523
532
|
weights_type (str): Type of weights to save (options: 'txt', 'npy', 'mat').
|
524
533
|
WeightFormat (str): Format of the weights (options: 'd', 'f', 'raw').
|
525
534
|
model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
|
526
|
-
|
535
|
+
scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
527
536
|
W: Weights of the model.
|
528
537
|
|
529
538
|
Returns:
|
@@ -573,7 +582,7 @@ def save_model(model_name,
|
|
573
582
|
'WEIGHTS TYPE': weights_type,
|
574
583
|
'WEIGHTS FORMAT': weights_format,
|
575
584
|
'MODEL PATH': model_path,
|
576
|
-
'STANDARD SCALER':
|
585
|
+
'STANDARD SCALER': scaler_params
|
577
586
|
}
|
578
587
|
try:
|
579
588
|
|
@@ -720,11 +729,11 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
720
729
|
"""
|
721
730
|
W, df = load_model(model_name, model_path)
|
722
731
|
|
723
|
-
|
732
|
+
scaler_params = str(df['STANDARD SCALER'].iloc[0])
|
724
733
|
|
725
|
-
if
|
734
|
+
if scaler_params != None:
|
726
735
|
|
727
|
-
Input = standard_scaler(Input, None)
|
736
|
+
Input = standard_scaler(scaler_params, Input, None)
|
728
737
|
|
729
738
|
layers = ['fex', 'cat']
|
730
739
|
|
@@ -752,7 +761,7 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
752
761
|
return neural_layer
|
753
762
|
|
754
763
|
|
755
|
-
def predict_model_ram(Input,
|
764
|
+
def predict_model_ram(Input, scaler_params, W):
|
756
765
|
|
757
766
|
infopredict_model_ram = """
|
758
767
|
Function to make a prediction using a divided pruning learning artificial neural network (PLAN).
|
@@ -760,16 +769,16 @@ def predict_model_ram(Input, scaler, W):
|
|
760
769
|
|
761
770
|
Arguments:
|
762
771
|
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
763
|
-
|
772
|
+
scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
764
773
|
W (list of ndarrays): Weights of the model.
|
765
774
|
|
766
775
|
Returns:
|
767
776
|
ndarray: Output from the model.
|
768
777
|
"""
|
769
778
|
|
770
|
-
if
|
779
|
+
if scaler_params != None:
|
771
780
|
|
772
|
-
Input = standard_scaler(Input, None)
|
781
|
+
Input = standard_scaler(scaler_params, Input, None)
|
773
782
|
|
774
783
|
layers = ['fex', 'cat']
|
775
784
|
|
@@ -896,7 +905,7 @@ def synthetic_augmentation(x_train, y_train):
|
|
896
905
|
return np.array(x_balanced), np.array(y_balanced)
|
897
906
|
|
898
907
|
|
899
|
-
def standard_scaler(x_train, x_test):
|
908
|
+
def standard_scaler(scaler_params=None, x_train, x_test):
|
900
909
|
info_standard_scaler = """
|
901
910
|
Standardizes training and test datasets. x_test may be None.
|
902
911
|
|
@@ -907,23 +916,31 @@ def standard_scaler(x_train, x_test):
|
|
907
916
|
Test data
|
908
917
|
|
909
918
|
Returns:
|
919
|
+
list:
|
920
|
+
Scaler parameters: mean and std
|
910
921
|
tuple
|
911
922
|
Standardized training and test datasets
|
912
923
|
"""
|
913
|
-
|
914
|
-
|
915
|
-
|
916
|
-
|
924
|
+
try:
|
925
|
+
|
926
|
+
if scaler_params == None:
|
927
|
+
mean = np.mean(x_train, axis=0)
|
928
|
+
std = np.std(x_train, axis=0)
|
929
|
+
scaler_params = [mean, std]
|
917
930
|
|
918
|
-
|
931
|
+
if x_test == None:
|
919
932
|
|
920
|
-
|
921
|
-
|
933
|
+
train_data_scaled = (x_train - mean) / std
|
934
|
+
return scaler_params, train_data_scaled
|
922
935
|
|
923
|
-
|
924
|
-
|
925
|
-
|
926
|
-
|
936
|
+
elif scaler_params == None:
|
937
|
+
train_data_scaled = (x_train - mean) / std
|
938
|
+
test_data_scaled = (x_test - mean) / std
|
939
|
+
return scaler_params, train_data_scaled, test_data_scaled
|
940
|
+
|
941
|
+
elif scaler_params != None:
|
942
|
+
test_data_scaled = (x_test - scaler_params[0]) / scaler_params[1]
|
943
|
+
return test_data_scaled
|
927
944
|
|
928
945
|
except:
|
929
946
|
print(
|
@@ -1296,6 +1313,102 @@ def plot_evaluate(y_test, y_preds, acc_list):
|
|
1296
1313
|
plt.tight_layout()
|
1297
1314
|
plt.show()
|
1298
1315
|
|
1316
|
+
def manuel_balancer(x_train, y_train, target_samples_per_class):
|
1317
|
+
"""
|
1318
|
+
Generates synthetic examples to balance classes to the specified number of examples per class.
|
1319
|
+
|
1320
|
+
Arguments:
|
1321
|
+
x_train -- Input dataset (examples) - NumPy array format
|
1322
|
+
y_train -- Class labels (one-hot encoded) - NumPy array format
|
1323
|
+
target_samples_per_class -- Desired number of samples per class
|
1324
|
+
|
1325
|
+
Returns:
|
1326
|
+
x_balanced -- Balanced input dataset (NumPy array format)
|
1327
|
+
y_balanced -- Balanced class labels (one-hot encoded, NumPy array format)
|
1328
|
+
"""
|
1329
|
+
start_time = time.time()
|
1330
|
+
x_train = np.array(x_train)
|
1331
|
+
y_train = np.array(y_train)
|
1332
|
+
classes = np.arange(y_train.shape[1])
|
1333
|
+
class_count = len(classes)
|
1334
|
+
|
1335
|
+
x_balanced = []
|
1336
|
+
y_balanced = []
|
1337
|
+
|
1338
|
+
for class_label in range(class_count):
|
1339
|
+
class_indices = np.where(np.argmax(y_train, axis=1) == class_label)[0]
|
1340
|
+
num_samples = len(class_indices)
|
1341
|
+
|
1342
|
+
if num_samples > target_samples_per_class:
|
1343
|
+
|
1344
|
+
selected_indices = np.random.choice(class_indices, target_samples_per_class, replace=False)
|
1345
|
+
x_balanced.append(x_train[selected_indices])
|
1346
|
+
y_balanced.append(y_train[selected_indices])
|
1347
|
+
|
1348
|
+
else:
|
1349
|
+
|
1350
|
+
x_balanced.append(x_train[class_indices])
|
1351
|
+
y_balanced.append(y_train[class_indices])
|
1352
|
+
|
1353
|
+
if num_samples < target_samples_per_class:
|
1354
|
+
|
1355
|
+
samples_to_add = target_samples_per_class - num_samples
|
1356
|
+
additional_samples = np.zeros((samples_to_add, x_train.shape[1]))
|
1357
|
+
additional_labels = np.zeros((samples_to_add, y_train.shape[1]))
|
1358
|
+
|
1359
|
+
for i in range(samples_to_add):
|
1360
|
+
uni_start_time = time.time()
|
1361
|
+
|
1362
|
+
random_indices = np.random.choice(class_indices, 2, replace=False)
|
1363
|
+
sample1 = x_train[random_indices[0]]
|
1364
|
+
sample2 = x_train[random_indices[1]]
|
1365
|
+
|
1366
|
+
|
1367
|
+
synthetic_sample = sample1 + (sample2 - sample1) * np.random.rand()
|
1368
|
+
|
1369
|
+
additional_samples[i] = synthetic_sample
|
1370
|
+
additional_labels[i] = y_train[class_indices[0]]
|
1371
|
+
|
1372
|
+
uni_end_time = time.time()
|
1373
|
+
calculating_est = round(
|
1374
|
+
(uni_end_time - uni_start_time) * (samples_to_add - i), 3)
|
1375
|
+
|
1376
|
+
if calculating_est < 60:
|
1377
|
+
print('\rest......(sec):', calculating_est, '\n', end="")
|
1378
|
+
|
1379
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
1380
|
+
print('\rest......(min):', calculating_est/60, '\n', end="")
|
1381
|
+
|
1382
|
+
elif calculating_est > 3600:
|
1383
|
+
print('\rest......(h):', calculating_est/3600, '\n', end="")
|
1384
|
+
|
1385
|
+
print('Augmenting: ', class_label, '/', class_count, '...', i, "/", samples_to_add, "\n", end="")
|
1386
|
+
|
1387
|
+
x_balanced.append(additional_samples)
|
1388
|
+
y_balanced.append(additional_labels)
|
1389
|
+
|
1390
|
+
|
1391
|
+
EndTime = time.time()
|
1392
|
+
|
1393
|
+
calculating_est = round(EndTime - start_time, 2)
|
1394
|
+
|
1395
|
+
print(Fore.GREEN + " \nBalancing Finished with 0 ERROR\n" + Style.RESET_ALL)
|
1396
|
+
|
1397
|
+
if calculating_est < 60:
|
1398
|
+
print('Total balancing time(sec): ', calculating_est)
|
1399
|
+
|
1400
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
1401
|
+
print('Total balancing time(min): ', calculating_est/60)
|
1402
|
+
|
1403
|
+
elif calculating_est > 3600:
|
1404
|
+
print('Total balancing time(h): ', calculating_est/3600)
|
1405
|
+
|
1406
|
+
# Stack the balanced arrays
|
1407
|
+
x_balanced = np.vstack(x_balanced)
|
1408
|
+
y_balanced = np.vstack(y_balanced)
|
1409
|
+
|
1410
|
+
return x_balanced, y_balanced
|
1411
|
+
|
1299
1412
|
def get_weights():
|
1300
1413
|
|
1301
1414
|
return 0
|
@@ -1,7 +1,7 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 2.2.
|
4
|
-
Summary:
|
3
|
+
Version: 2.2.9
|
4
|
+
Summary: Weights now noramlized, new function: manuel_balancer. And scaler_params added for scaled models.
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
7
7
|
Keywords: model evaluation,classifcation,pruning learning artficial neural networks
|
@@ -0,0 +1,8 @@
|
|
1
|
+
plan_bi/__init__.py,sha256=m84PX8yNU6pVlQNyWOqamkQdx_-_aPFsfvtwEnC8U_w,492
|
2
|
+
plan_bi/plan_bi.py,sha256=ehSRrQ7_5YRKEVrHNazP8CPZSlJWVxB-lDZUTPO_Q7g,50053
|
3
|
+
plan_di/__init__.py,sha256=cp43HHfPlIYVGFULVoiEwjFE8paMHCAgYLSic355gqs,483
|
4
|
+
plan_di/plan_di.py,sha256=5IBktVVqKENjDm2gi8ykNOaWfxspoJuG_36FrB0m07k,47444
|
5
|
+
pyerualjetwork-2.2.9.dist-info/METADATA,sha256=w1H4NKan5q2vMU7TjOyDVGnMqWHKf3kDFYpQiPMZPf8,319
|
6
|
+
pyerualjetwork-2.2.9.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
7
|
+
pyerualjetwork-2.2.9.dist-info/top_level.txt,sha256=aaXSOcnD62fbXG1x7tw4nV50Qxx9g9zDNLK7OD4BdPE,16
|
8
|
+
pyerualjetwork-2.2.9.dist-info/RECORD,,
|
@@ -1,8 +0,0 @@
|
|
1
|
-
plan_bi/__init__.py,sha256=rzDe7yWvNlwDVE6xSw8Qk51itcxT6EDBj7iiOeycxMw,475
|
2
|
-
plan_bi/plan_bi.py,sha256=n0OdGsUsbmUlP8RUw4HBpfFe8ONQo3o3qQulHyg2Lr0,45371
|
3
|
-
plan_di/__init__.py,sha256=Omxc07PXPQZOrXBD3PJQT6sPdni6NMykyiQgKVL_IZ0,466
|
4
|
-
plan_di/plan_di.py,sha256=EkEdGsC-QW2JFqIx9C59XbFzWnc9zCL5DB3eGa9208k,42742
|
5
|
-
pyerualjetwork-2.2.8.dist-info/METADATA,sha256=YlFE--h2sLunB0pGGCvUqmlPtnYdb9mVYU_GsW7N6JY,325
|
6
|
-
pyerualjetwork-2.2.8.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
7
|
-
pyerualjetwork-2.2.8.dist-info/top_level.txt,sha256=aaXSOcnD62fbXG1x7tw4nV50Qxx9g9zDNLK7OD4BdPE,16
|
8
|
-
pyerualjetwork-2.2.8.dist-info/RECORD,,
|
File without changes
|
File without changes
|