pyerualjetwork 2.1.9__py3-none-any.whl → 2.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- plan_bi/plan_bi.py +74 -33
- plan_di/plan_di.py +76 -65
- {pyerualjetwork-2.1.9.dist-info → pyerualjetwork-2.2.1.dist-info}/METADATA +1 -1
- pyerualjetwork-2.2.1.dist-info/RECORD +8 -0
- pyerualjetwork-2.1.9.dist-info/RECORD +0 -8
- {pyerualjetwork-2.1.9.dist-info → pyerualjetwork-2.2.1.dist-info}/WHEEL +0 -0
- {pyerualjetwork-2.1.9.dist-info → pyerualjetwork-2.2.1.dist-info}/top_level.txt +0 -0
plan_bi/plan_bi.py
CHANGED
@@ -95,8 +95,6 @@ def fit(
|
|
95
95
|
|
96
96
|
if Layer == 'fex':
|
97
97
|
W[Lindex] = fex(neural_layer, W[Lindex], activation_potential, Piece[Windex], True, y)
|
98
|
-
elif Layer == 'cat':
|
99
|
-
W[Lindex] = cat(neural_layer, W[Lindex], True, y)
|
100
98
|
|
101
99
|
|
102
100
|
for i, w in enumerate(W):
|
@@ -172,6 +170,8 @@ def weight_identification(
|
|
172
170
|
for w in range(ws):
|
173
171
|
W[w + 1] = np.ones((neurons[w + 1],neurons[w]))
|
174
172
|
W[layer_count] = np.ones((class_count,neurons[layer_count - 1]))
|
173
|
+
W[len(W) - 1] = np.eye(class_count)
|
174
|
+
|
175
175
|
return W
|
176
176
|
|
177
177
|
def synaptic_pruning(
|
@@ -471,7 +471,7 @@ def evaluate(
|
|
471
471
|
if layers[index] == 'fex':
|
472
472
|
neural_layer = fex(neural_layer, W[index], activation_potential, None, False, None)
|
473
473
|
if layers[index] == 'cat':
|
474
|
-
neural_layer =
|
474
|
+
neural_layer = np.dot(W[index], neural_layer)
|
475
475
|
|
476
476
|
for i, w in enumerate(Wc):
|
477
477
|
W[i] = np.copy(w)
|
@@ -594,7 +594,7 @@ def multiple_evaluate(
|
|
594
594
|
if layers[index] == 'fex':
|
595
595
|
neural_layer = fex(neural_layer, W[index], activation_potentials[m], None, False, None)
|
596
596
|
if layers[index] == 'cat':
|
597
|
-
neural_layer =
|
597
|
+
neural_layer = np.dot(W[index], neural_layer)
|
598
598
|
|
599
599
|
output_layer += neural_layer
|
600
600
|
|
@@ -914,7 +914,7 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
914
914
|
if layers[index] == 'fex':
|
915
915
|
neural_layer = fex(neural_layer, W[index], activation_potential, None, False, None)
|
916
916
|
if layers[index] == 'cat':
|
917
|
-
neural_layer =
|
917
|
+
neural_layer = np.dot(W[index], neural_layer)
|
918
918
|
except:
|
919
919
|
print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: predict_model_ssd" + infopredict_model_ssd + Style.RESET_ALL)
|
920
920
|
return 'e'
|
@@ -958,7 +958,7 @@ def predict_model_ram(Input, activation_potential, scaler, W):
|
|
958
958
|
if layers[index] == 'fex':
|
959
959
|
neural_layer = fex(neural_layer, W[index], activation_potential, None, False, None)
|
960
960
|
if layers[index] == 'cat':
|
961
|
-
neural_layer =
|
961
|
+
neural_layer = np.dot(W[index], neural_layer)
|
962
962
|
|
963
963
|
except:
|
964
964
|
print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." + infopredict_model_ram + Style.RESET_ALL)
|
@@ -1171,13 +1171,14 @@ def split(X, y, test_size, random_state):
|
|
1171
1171
|
return x_train, x_test, y_train, y_test
|
1172
1172
|
|
1173
1173
|
|
1174
|
-
def metrics(y_ts, test_preds):
|
1174
|
+
def metrics(y_ts, test_preds, average='weighted'):
|
1175
1175
|
"""
|
1176
1176
|
Calculates precision, recall and F1 score for a classification task.
|
1177
|
-
|
1177
|
+
|
1178
1178
|
Args:
|
1179
|
-
|
1179
|
+
y_ts (list or numpy.ndarray): True labels.
|
1180
1180
|
test_preds (list or numpy.ndarray): Predicted labels.
|
1181
|
+
average (str): Type of averaging ('micro', 'macro', 'weighted').
|
1181
1182
|
|
1182
1183
|
Returns:
|
1183
1184
|
tuple: Precision, recall, F1 score.
|
@@ -1218,17 +1219,29 @@ def metrics(y_ts, test_preds):
|
|
1218
1219
|
for c in classes:
|
1219
1220
|
precision[c] = tp[c] / (tp[c] + fp[c]) if (tp[c] + fp[c]) > 0 else 0
|
1220
1221
|
recall[c] = tp[c] / (tp[c] + fn[c]) if (tp[c] + fn[c]) > 0 else 0
|
1221
|
-
f1[c] = 2 * (precision[c] * recall[c]) / (precision[c] +
|
1222
|
-
|
1222
|
+
f1[c] = 2 * (precision[c] * recall[c]) / (precision[c] + recall[c]) if (precision[c] + recall[c]) > 0 else 0
|
1223
|
+
|
1224
|
+
if average == 'micro':
|
1225
|
+
precision_val = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(fp.values()))) if (np.sum(list(tp.values())) + np.sum(list(fp.values()))) > 0 else 0
|
1226
|
+
recall_val = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(fn.values()))) if (np.sum(list(tp.values())) + np.sum(list(fn.values()))) > 0 else 0
|
1227
|
+
f1_val = 2 * (precision_val * recall_val) / (precision_val + recall_val) if (precision_val + recall_val) > 0 else 0
|
1228
|
+
|
1229
|
+
elif average == 'macro':
|
1230
|
+
precision_val = np.mean(list(precision.values()))
|
1231
|
+
recall_val = np.mean(list(recall.values()))
|
1232
|
+
f1_val = np.mean(list(f1.values()))
|
1223
1233
|
|
1224
|
-
|
1225
|
-
|
1226
|
-
|
1227
|
-
|
1228
|
-
|
1229
|
-
|
1234
|
+
elif average == 'weighted':
|
1235
|
+
weights = np.array([np.sum(y_test_d == c) for c in classes])
|
1236
|
+
weights = weights / np.sum(weights)
|
1237
|
+
precision_val = np.sum([weights[i] * precision[classes[i]] for i in range(len(classes))])
|
1238
|
+
recall_val = np.sum([weights[i] * recall[classes[i]] for i in range(len(classes))])
|
1239
|
+
f1_val = np.sum([weights[i] * f1[classes[i]] for i in range(len(classes))])
|
1230
1240
|
|
1231
|
-
|
1241
|
+
else:
|
1242
|
+
raise ValueError("Invalid value for 'average'. Choose from 'micro', 'macro', 'weighted'.")
|
1243
|
+
|
1244
|
+
return precision_val, recall_val, f1_val
|
1232
1245
|
|
1233
1246
|
|
1234
1247
|
def decode_one_hot(encoded_data):
|
@@ -1378,21 +1391,49 @@ def plot_evaluate(y_test, y_preds, acc_list):
|
|
1378
1391
|
axs[1, 0].legend(loc="lower right")
|
1379
1392
|
axs[1, 0].legend(loc="lower right")
|
1380
1393
|
else:
|
1381
|
-
accuracy_per_class = []
|
1382
|
-
|
1383
|
-
for cls in Class:
|
1384
|
-
correct = np.sum((y_true == cls) & (y_preds == cls))
|
1385
|
-
total = np.sum(y_true == cls)
|
1386
|
-
accuracy_cls = correct / total if total > 0 else 0.0
|
1387
|
-
accuracy_per_class.append(accuracy_cls)
|
1388
|
-
|
1389
|
-
axs[1, 0].bar(Class, accuracy_per_class, color='b', alpha=0.7)
|
1390
|
-
axs[1, 0].set_xlabel('Class')
|
1391
|
-
axs[1, 0].set_ylabel('Accuracy')
|
1392
|
-
axs[1, 0].set_title('Class-wise Accuracy')
|
1393
|
-
axs[1, 0].set_xticks(Class)
|
1394
|
-
axs[1, 0].grid(True)
|
1395
1394
|
|
1395
|
+
for i in range(len(Class)):
|
1396
|
+
|
1397
|
+
y_true_copy = np.copy(y_true)
|
1398
|
+
y_preds_copy = np.copy(y_preds)
|
1399
|
+
|
1400
|
+
y_true_copy[y_true_copy == i] = 0
|
1401
|
+
y_true_copy[y_true_copy != 0] = 1
|
1402
|
+
|
1403
|
+
y_preds_copy[y_preds_copy == i] = 0
|
1404
|
+
y_preds_copy[y_preds_copy != 0] = 1
|
1405
|
+
|
1406
|
+
|
1407
|
+
fpr, tpr, thresholds = roc_curve(y_true_copy, y_preds_copy)
|
1408
|
+
|
1409
|
+
roc_auc = np.trapz(tpr, fpr)
|
1410
|
+
axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
|
1411
|
+
axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
|
1412
|
+
axs[1, 0].set_xlim([0.0, 1.0])
|
1413
|
+
axs[1, 0].set_ylim([0.0, 1.05])
|
1414
|
+
axs[1, 0].set_xlabel('False Positive Rate')
|
1415
|
+
axs[1, 0].set_ylabel('True Positive Rate')
|
1416
|
+
axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
|
1417
|
+
axs[1, 0].legend(loc="lower right")
|
1418
|
+
axs[1, 0].legend(loc="lower right")
|
1419
|
+
|
1420
|
+
|
1421
|
+
"""
|
1422
|
+
accuracy_per_class = []
|
1423
|
+
|
1424
|
+
for cls in Class:
|
1425
|
+
correct = np.sum((y_true == cls) & (y_preds == cls))
|
1426
|
+
total = np.sum(y_true == cls)
|
1427
|
+
accuracy_cls = correct / total if total > 0 else 0.0
|
1428
|
+
accuracy_per_class.append(accuracy_cls)
|
1429
|
+
|
1430
|
+
axs[2, 0].bar(Class, accuracy_per_class, color='b', alpha=0.7)
|
1431
|
+
axs[2, 0].set_xlabel('Class')
|
1432
|
+
axs[2, 0].set_ylabel('Accuracy')
|
1433
|
+
axs[2, 0].set_title('Class-wise Accuracy')
|
1434
|
+
axs[2, 0].set_xticks(Class)
|
1435
|
+
axs[2, 0].grid(True)
|
1436
|
+
"""
|
1396
1437
|
|
1397
1438
|
|
1398
1439
|
|
@@ -1413,7 +1454,7 @@ def plot_evaluate(y_test, y_preds, acc_list):
|
|
1413
1454
|
axs[0, 1].set_ylim(0, 1) # Y eksenini 0 ile 1 arasında sınırla
|
1414
1455
|
axs[0, 1].set_xlabel('Metrics')
|
1415
1456
|
axs[0, 1].set_ylabel('Score')
|
1416
|
-
axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy')
|
1457
|
+
axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy (Wighted)')
|
1417
1458
|
axs[0, 1].grid(True, axis='y', linestyle='--', alpha=0.7)
|
1418
1459
|
|
1419
1460
|
# Accuracy
|
plan_di/plan_di.py
CHANGED
@@ -58,6 +58,7 @@ def fit(
|
|
58
58
|
|
59
59
|
W = weight_identification(
|
60
60
|
len(layers) - 1, len(class_count), neurons, x_train_size)
|
61
|
+
|
61
62
|
Divides, Piece = synaptic_dividing(len(class_count), W)
|
62
63
|
trained_W = [1] * len(W)
|
63
64
|
print(Fore.GREEN + "Train Started with 0 ERROR" + Style.RESET_ALL)
|
@@ -94,8 +95,6 @@ def fit(
|
|
94
95
|
y = np.argmax(y_train[index])
|
95
96
|
if Layer == 'fex':
|
96
97
|
W[Lindex] = fex(neural_layer, W[Lindex], True, y)
|
97
|
-
elif Layer == 'cat':
|
98
|
-
W[Lindex] = cat(neural_layer, W[Lindex], True, y)
|
99
98
|
|
100
99
|
for i, w in enumerate(W):
|
101
100
|
trained_W[i] = trained_W[i] + w
|
@@ -165,6 +164,8 @@ def weight_identification(
|
|
165
164
|
for w in range(ws):
|
166
165
|
W[w + 1] = np.ones((neurons[w + 1], neurons[w]))
|
167
166
|
W[layer_count] = np.ones((class_count, neurons[layer_count - 1]))
|
167
|
+
W[len(W) - 1] = np.eye(class_count)
|
168
|
+
|
168
169
|
return W
|
169
170
|
|
170
171
|
|
@@ -302,37 +303,6 @@ def fex(
|
|
302
303
|
return neural_layer
|
303
304
|
|
304
305
|
|
305
|
-
def cat(
|
306
|
-
Input, # list[num]: Input data.
|
307
|
-
w, # list[num]: Weight matrix of the neural network.
|
308
|
-
# (bool): Flag indicating if the function is called during training (True or False).
|
309
|
-
is_training,
|
310
|
-
Class
|
311
|
-
) -> tuple:
|
312
|
-
"""
|
313
|
-
Applies categorization process to the input data using synaptic pruning if specified.
|
314
|
-
|
315
|
-
Args:
|
316
|
-
Input (list[num]): Input data.
|
317
|
-
w (list[num]): Weight matrix of the neural network.
|
318
|
-
is_training (bool): Flag indicating if the function is called during training (True or False).
|
319
|
-
Class (int): if is during training then which class(label) ? is isnt then put None.
|
320
|
-
Returns:
|
321
|
-
tuple: A tuple containing the neural layer (vector) result and the possibly updated weight matrix.
|
322
|
-
"""
|
323
|
-
|
324
|
-
if is_training == True:
|
325
|
-
|
326
|
-
w[Class, Class] += 1
|
327
|
-
|
328
|
-
return w
|
329
|
-
|
330
|
-
else:
|
331
|
-
|
332
|
-
neural_layer = np.dot(w, Input)
|
333
|
-
|
334
|
-
return neural_layer
|
335
|
-
|
336
306
|
|
337
307
|
def normalization(
|
338
308
|
Input # num: Input data to be normalized.
|
@@ -449,7 +419,7 @@ def evaluate(
|
|
449
419
|
if Layer == 'fex':
|
450
420
|
neural_layer = fex(neural_layer, W[index], False, None)
|
451
421
|
elif Layer == 'cat':
|
452
|
-
neural_layer =
|
422
|
+
neural_layer = np.dot(W[index], neural_layer)
|
453
423
|
|
454
424
|
for i, w in enumerate(Wc):
|
455
425
|
W[i] = np.copy(w)
|
@@ -571,7 +541,7 @@ def multiple_evaluate(
|
|
571
541
|
if Layer == 'fex':
|
572
542
|
neural_layer = fex(neural_layer, W[index], False, None)
|
573
543
|
elif Layer == 'cat':
|
574
|
-
neural_layer =
|
544
|
+
neural_layer = np.dot(W[index], neural_layer)
|
575
545
|
|
576
546
|
output_layer += neural_layer
|
577
547
|
|
@@ -887,7 +857,7 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
887
857
|
if Layer == 'fex':
|
888
858
|
neural_layer = fex(neural_layer, W[index], False, None)
|
889
859
|
elif Layer == 'cat':
|
890
|
-
neural_layer =
|
860
|
+
neural_layer = np.dot(W[index], neural_layer)
|
891
861
|
except:
|
892
862
|
print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: predict_model_ssd" +
|
893
863
|
infopredict_model_ssd + Style.RESET_ALL)
|
@@ -932,7 +902,7 @@ def predict_model_ram(Input, scaler, W):
|
|
932
902
|
if Layer == 'fex':
|
933
903
|
neural_layer = fex(neural_layer, W[index], False, None)
|
934
904
|
elif Layer == 'cat':
|
935
|
-
neural_layer =
|
905
|
+
neural_layer = np.dot(W[index], neural_layer)
|
936
906
|
|
937
907
|
except:
|
938
908
|
print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." +
|
@@ -1145,13 +1115,14 @@ def split(X, y, test_size, random_state):
|
|
1145
1115
|
return x_train, x_test, y_train, y_test
|
1146
1116
|
|
1147
1117
|
|
1148
|
-
def metrics(y_ts, test_preds):
|
1118
|
+
def metrics(y_ts, test_preds, average='weighted'):
|
1149
1119
|
"""
|
1150
1120
|
Calculates precision, recall and F1 score for a classification task.
|
1151
|
-
|
1121
|
+
|
1152
1122
|
Args:
|
1153
|
-
|
1123
|
+
y_ts (list or numpy.ndarray): True labels.
|
1154
1124
|
test_preds (list or numpy.ndarray): Predicted labels.
|
1125
|
+
average (str): Type of averaging ('micro', 'macro', 'weighted').
|
1155
1126
|
|
1156
1127
|
Returns:
|
1157
1128
|
tuple: Precision, recall, F1 score.
|
@@ -1192,17 +1163,29 @@ def metrics(y_ts, test_preds):
|
|
1192
1163
|
for c in classes:
|
1193
1164
|
precision[c] = tp[c] / (tp[c] + fp[c]) if (tp[c] + fp[c]) > 0 else 0
|
1194
1165
|
recall[c] = tp[c] / (tp[c] + fn[c]) if (tp[c] + fn[c]) > 0 else 0
|
1195
|
-
f1[c] = 2 * (precision[c] * recall[c]) / (precision[c] +
|
1196
|
-
|
1166
|
+
f1[c] = 2 * (precision[c] * recall[c]) / (precision[c] + recall[c]) if (precision[c] + recall[c]) > 0 else 0
|
1167
|
+
|
1168
|
+
if average == 'micro':
|
1169
|
+
precision_val = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(fp.values()))) if (np.sum(list(tp.values())) + np.sum(list(fp.values()))) > 0 else 0
|
1170
|
+
recall_val = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(fn.values()))) if (np.sum(list(tp.values())) + np.sum(list(fn.values()))) > 0 else 0
|
1171
|
+
f1_val = 2 * (precision_val * recall_val) / (precision_val + recall_val) if (precision_val + recall_val) > 0 else 0
|
1197
1172
|
|
1198
|
-
|
1199
|
-
|
1200
|
-
|
1201
|
-
|
1202
|
-
micro_f1 = 2 * (micro_precision * micro_recall) / (micro_precision +
|
1203
|
-
micro_recall) if (micro_precision + micro_recall) > 0 else 0
|
1173
|
+
elif average == 'macro':
|
1174
|
+
precision_val = np.mean(list(precision.values()))
|
1175
|
+
recall_val = np.mean(list(recall.values()))
|
1176
|
+
f1_val = np.mean(list(f1.values()))
|
1204
1177
|
|
1205
|
-
|
1178
|
+
elif average == 'weighted':
|
1179
|
+
weights = np.array([np.sum(y_test_d == c) for c in classes])
|
1180
|
+
weights = weights / np.sum(weights)
|
1181
|
+
precision_val = np.sum([weights[i] * precision[classes[i]] for i in range(len(classes))])
|
1182
|
+
recall_val = np.sum([weights[i] * recall[classes[i]] for i in range(len(classes))])
|
1183
|
+
f1_val = np.sum([weights[i] * f1[classes[i]] for i in range(len(classes))])
|
1184
|
+
|
1185
|
+
else:
|
1186
|
+
raise ValueError("Invalid value for 'average'. Choose from 'micro', 'macro', 'weighted'.")
|
1187
|
+
|
1188
|
+
return precision_val, recall_val, f1_val
|
1206
1189
|
|
1207
1190
|
|
1208
1191
|
def decode_one_hot(encoded_data):
|
@@ -1345,21 +1328,49 @@ def plot_evaluate(y_test, y_preds, acc_list):
|
|
1345
1328
|
axs[1, 0].legend(loc="lower right")
|
1346
1329
|
axs[1, 0].legend(loc="lower right")
|
1347
1330
|
else:
|
1348
|
-
accuracy_per_class = []
|
1349
|
-
|
1350
|
-
for cls in Class:
|
1351
|
-
correct = np.sum((y_true == cls) & (y_preds == cls))
|
1352
|
-
total = np.sum(y_true == cls)
|
1353
|
-
accuracy_cls = correct / total if total > 0 else 0.0
|
1354
|
-
accuracy_per_class.append(accuracy_cls)
|
1355
|
-
|
1356
|
-
axs[1, 0].bar(Class, accuracy_per_class, color='b', alpha=0.7)
|
1357
|
-
axs[1, 0].set_xlabel('Class')
|
1358
|
-
axs[1, 0].set_ylabel('Accuracy')
|
1359
|
-
axs[1, 0].set_title('Class-wise Accuracy')
|
1360
|
-
axs[1, 0].set_xticks(Class)
|
1361
|
-
axs[1, 0].grid(True)
|
1362
1331
|
|
1332
|
+
for i in range(len(Class)):
|
1333
|
+
|
1334
|
+
y_true_copy = np.copy(y_true)
|
1335
|
+
y_preds_copy = np.copy(y_preds)
|
1336
|
+
|
1337
|
+
y_true_copy[y_true_copy == i] = 0
|
1338
|
+
y_true_copy[y_true_copy != 0] = 1
|
1339
|
+
|
1340
|
+
y_preds_copy[y_preds_copy == i] = 0
|
1341
|
+
y_preds_copy[y_preds_copy != 0] = 1
|
1342
|
+
|
1343
|
+
|
1344
|
+
fpr, tpr, thresholds = roc_curve(y_true_copy, y_preds_copy)
|
1345
|
+
|
1346
|
+
roc_auc = np.trapz(tpr, fpr)
|
1347
|
+
axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
|
1348
|
+
axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
|
1349
|
+
axs[1, 0].set_xlim([0.0, 1.0])
|
1350
|
+
axs[1, 0].set_ylim([0.0, 1.05])
|
1351
|
+
axs[1, 0].set_xlabel('False Positive Rate')
|
1352
|
+
axs[1, 0].set_ylabel('True Positive Rate')
|
1353
|
+
axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
|
1354
|
+
axs[1, 0].legend(loc="lower right")
|
1355
|
+
axs[1, 0].legend(loc="lower right")
|
1356
|
+
|
1357
|
+
|
1358
|
+
"""
|
1359
|
+
accuracy_per_class = []
|
1360
|
+
|
1361
|
+
for cls in Class:
|
1362
|
+
correct = np.sum((y_true == cls) & (y_preds == cls))
|
1363
|
+
total = np.sum(y_true == cls)
|
1364
|
+
accuracy_cls = correct / total if total > 0 else 0.0
|
1365
|
+
accuracy_per_class.append(accuracy_cls)
|
1366
|
+
|
1367
|
+
axs[2, 0].bar(Class, accuracy_per_class, color='b', alpha=0.7)
|
1368
|
+
axs[2, 0].set_xlabel('Class')
|
1369
|
+
axs[2, 0].set_ylabel('Accuracy')
|
1370
|
+
axs[2, 0].set_title('Class-wise Accuracy')
|
1371
|
+
axs[2, 0].set_xticks(Class)
|
1372
|
+
axs[2, 0].grid(True)
|
1373
|
+
"""
|
1363
1374
|
|
1364
1375
|
|
1365
1376
|
|
@@ -1380,7 +1391,7 @@ def plot_evaluate(y_test, y_preds, acc_list):
|
|
1380
1391
|
axs[0, 1].set_ylim(0, 1) # Y eksenini 0 ile 1 arasında sınırla
|
1381
1392
|
axs[0, 1].set_xlabel('Metrics')
|
1382
1393
|
axs[0, 1].set_ylabel('Score')
|
1383
|
-
axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy')
|
1394
|
+
axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy (Wighted)')
|
1384
1395
|
axs[0, 1].grid(True, axis='y', linestyle='--', alpha=0.7)
|
1385
1396
|
|
1386
1397
|
# Accuracy
|
@@ -1417,4 +1428,4 @@ def get_preds():
|
|
1417
1428
|
|
1418
1429
|
def get_acc():
|
1419
1430
|
|
1420
|
-
return 2
|
1431
|
+
return 2
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 2.1
|
3
|
+
Version: 2.2.1
|
4
4
|
Summary: 8 new functions: multiple_evaluate , encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate And Code improvements (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -0,0 +1,8 @@
|
|
1
|
+
plan_bi/__init__.py,sha256=82q8bWRYqzwMrFuViQzBg7P19i6EqdV7VYBVxuQ-LV0,517
|
2
|
+
plan_bi/plan_bi.py,sha256=fNJK07y9JDVLU0GORD_RHKBjTUnVzb5ciYcQFpyiSvc,50490
|
3
|
+
plan_di/__init__.py,sha256=Eut7tVtvQaczEejYyqfQ4eqF71j69josJcY91WN_dkk,508
|
4
|
+
plan_di/plan_di.py,sha256=gKY0yCzX1nNNwdRwGOfmaLcvuQwfg1VrWD5oieVXz08,46265
|
5
|
+
pyerualjetwork-2.2.1.dist-info/METADATA,sha256=0P5kjlorW8wIUW_p7b90xAvyFYM6cprTalkAHOZ3IMo,457
|
6
|
+
pyerualjetwork-2.2.1.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
7
|
+
pyerualjetwork-2.2.1.dist-info/top_level.txt,sha256=aaXSOcnD62fbXG1x7tw4nV50Qxx9g9zDNLK7OD4BdPE,16
|
8
|
+
pyerualjetwork-2.2.1.dist-info/RECORD,,
|
@@ -1,8 +0,0 @@
|
|
1
|
-
plan_bi/__init__.py,sha256=82q8bWRYqzwMrFuViQzBg7P19i6EqdV7VYBVxuQ-LV0,517
|
2
|
-
plan_bi/plan_bi.py,sha256=5lzUpnRV5hKhSsErqojShC9jtJ0Uy83Two90jmKOvZk,48701
|
3
|
-
plan_di/__init__.py,sha256=Eut7tVtvQaczEejYyqfQ4eqF71j69josJcY91WN_dkk,508
|
4
|
-
plan_di/plan_di.py,sha256=LYfuCW2KkzP-rACl4jt9zTry96-L-nSOJSf1tS9pXr0,45451
|
5
|
-
pyerualjetwork-2.1.9.dist-info/METADATA,sha256=QrFkTmYHTP1coZUcEoB8iKxsCXe-njXxheq_6d4JBJc,457
|
6
|
-
pyerualjetwork-2.1.9.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
7
|
-
pyerualjetwork-2.1.9.dist-info/top_level.txt,sha256=aaXSOcnD62fbXG1x7tw4nV50Qxx9g9zDNLK7OD4BdPE,16
|
8
|
-
pyerualjetwork-2.1.9.dist-info/RECORD,,
|
File without changes
|
File without changes
|