pyerualjetwork 2.1.6__py3-none-any.whl → 2.1.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- plan_bi/plan_bi.py +54 -34
- plan_di/plan_di.py +62 -48
- {pyerualjetwork-2.1.6.dist-info → pyerualjetwork-2.1.8.dist-info}/METADATA +1 -1
- pyerualjetwork-2.1.8.dist-info/RECORD +8 -0
- pyerualjetwork-2.1.6.dist-info/RECORD +0 -8
- {pyerualjetwork-2.1.6.dist-info → pyerualjetwork-2.1.8.dist-info}/WHEEL +0 -0
- {pyerualjetwork-2.1.6.dist-info → pyerualjetwork-2.1.8.dist-info}/top_level.txt +0 -0
plan_bi/plan_bi.py
CHANGED
@@ -1144,7 +1144,7 @@ def split(X, y, test_size, random_state):
|
|
1144
1144
|
Returns:
|
1145
1145
|
tuple: x_train, x_test, y_train, y_test as ordered training and testing data subsets.
|
1146
1146
|
"""
|
1147
|
-
|
1147
|
+
|
1148
1148
|
num_samples = X.shape[0]
|
1149
1149
|
|
1150
1150
|
if isinstance(test_size, float):
|
@@ -1249,48 +1249,67 @@ def decode_one_hot(encoded_data):
|
|
1249
1249
|
|
1250
1250
|
def roc_curve(y_true, y_score):
|
1251
1251
|
"""
|
1252
|
-
|
1252
|
+
Compute Receiver Operating Characteristic (ROC) curve.
|
1253
1253
|
|
1254
|
-
|
1255
|
-
|
1256
|
-
|
1254
|
+
Parameters:
|
1255
|
+
y_true : array, shape = [n_samples]
|
1256
|
+
True binary labels in range {0, 1} or {-1, 1}.
|
1257
|
+
y_score : array, shape = [n_samples]
|
1258
|
+
Target scores, can either be probability estimates of the positive class,
|
1259
|
+
confidence values, or non-thresholded measure of decisions (as returned
|
1260
|
+
by decision_function on some classifiers).
|
1257
1261
|
|
1258
1262
|
Returns:
|
1259
|
-
|
1263
|
+
fpr : array, shape = [n]
|
1264
|
+
Increasing false positive rates such that element i is the false positive rate
|
1265
|
+
of predictions with score >= thresholds[i].
|
1266
|
+
tpr : array, shape = [n]
|
1267
|
+
Increasing true positive rates such that element i is the true positive rate
|
1268
|
+
of predictions with score >= thresholds[i].
|
1269
|
+
thresholds : array, shape = [n]
|
1270
|
+
Decreasing thresholds on the decision function used to compute fpr and tpr.
|
1260
1271
|
"""
|
1272
|
+
|
1273
|
+
y_true = np.asarray(y_true)
|
1274
|
+
y_score = np.asarray(y_score)
|
1261
1275
|
|
1262
|
-
|
1263
|
-
|
1264
|
-
y_true_sorted = y_true[idx]
|
1276
|
+
if len(np.unique(y_true)) != 2:
|
1277
|
+
raise ValueError("Only binary classification is supported.")
|
1265
1278
|
|
1266
|
-
|
1267
|
-
|
1268
|
-
|
1279
|
+
|
1280
|
+
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
|
1281
|
+
y_score = y_score[desc_score_indices]
|
1282
|
+
y_true = y_true[desc_score_indices]
|
1269
1283
|
|
1270
|
-
for threshold in thresholds:
|
1271
|
-
y_pred_binary = np.where(y_score_sorted >= threshold, 1, 0)
|
1272
1284
|
|
1273
|
-
|
1274
|
-
|
1275
|
-
|
1276
|
-
|
1285
|
+
fpr = []
|
1286
|
+
tpr = []
|
1287
|
+
thresholds = []
|
1288
|
+
n_pos = np.sum(y_true)
|
1289
|
+
n_neg = len(y_true) - n_pos
|
1277
1290
|
|
1278
|
-
|
1279
|
-
|
1280
|
-
|
1281
|
-
else:
|
1282
|
-
tpr_value = tp / (tp + fn)
|
1291
|
+
tp = 0
|
1292
|
+
fp = 0
|
1293
|
+
prev_score = None
|
1283
1294
|
|
1284
|
-
|
1285
|
-
|
1286
|
-
|
1287
|
-
|
1295
|
+
|
1296
|
+
for i, score in enumerate(y_score):
|
1297
|
+
if score != prev_score:
|
1298
|
+
fpr.append(fp / n_neg)
|
1299
|
+
tpr.append(tp / n_pos)
|
1300
|
+
thresholds.append(score)
|
1301
|
+
prev_score = score
|
1288
1302
|
|
1289
|
-
|
1290
|
-
|
1303
|
+
if y_true[i] == 1:
|
1304
|
+
tp += 1
|
1305
|
+
else:
|
1306
|
+
fp += 1
|
1291
1307
|
|
1292
|
-
|
1308
|
+
fpr.append(fp / n_neg)
|
1309
|
+
tpr.append(tp / n_pos)
|
1310
|
+
thresholds.append(score)
|
1293
1311
|
|
1312
|
+
return np.array(fpr), np.array(tpr), np.array(thresholds)
|
1294
1313
|
|
1295
1314
|
def confusion_matrix(y_true, y_pred, class_count=None):
|
1296
1315
|
"""
|
@@ -1313,7 +1332,7 @@ def confusion_matrix(y_true, y_pred, class_count=None):
|
|
1313
1332
|
true_label = y_true[i]
|
1314
1333
|
pred_label = y_pred[i]
|
1315
1334
|
|
1316
|
-
|
1335
|
+
|
1317
1336
|
if 0 <= true_label < class_count and 0 <= pred_label < class_count:
|
1318
1337
|
confusion[true_label, pred_label] += 1
|
1319
1338
|
else:
|
@@ -1331,7 +1350,7 @@ def plot_evaluate(y_test, y_preds, acc_list):
|
|
1331
1350
|
y_preds = np.array(y_preds)
|
1332
1351
|
fpr, tpr, thresholds = roc_curve(y_true, y_preds)
|
1333
1352
|
precision, recall, f1 = metrics(y_test, y_preds)
|
1334
|
-
Class = np.unique(y_test)
|
1353
|
+
Class = np.unique(decode_one_hot(y_test))
|
1335
1354
|
|
1336
1355
|
|
1337
1356
|
cm = confusion_matrix(y_true, y_preds, len(Class))
|
@@ -1345,7 +1364,8 @@ def plot_evaluate(y_test, y_preds, acc_list):
|
|
1345
1364
|
axs[0, 0].set_ylabel("Actual Class")
|
1346
1365
|
|
1347
1366
|
# ROC Curve
|
1348
|
-
|
1367
|
+
roc_auc = np.trapz(tpr, fpr)
|
1368
|
+
axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
|
1349
1369
|
axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
|
1350
1370
|
axs[1, 0].set_xlim([0.0, 1.0])
|
1351
1371
|
axs[1, 0].set_ylim([0.0, 1.05])
|
@@ -1353,6 +1373,7 @@ def plot_evaluate(y_test, y_preds, acc_list):
|
|
1353
1373
|
axs[1, 0].set_ylabel('True Positive Rate')
|
1354
1374
|
axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
|
1355
1375
|
axs[1, 0].legend(loc="lower right")
|
1376
|
+
axs[1, 0].legend(loc="lower right")
|
1356
1377
|
|
1357
1378
|
# Precision, Recall, F1 Score, Accuracy
|
1358
1379
|
metric = ['Precision', 'Recall', 'F1 Score', 'Accuracy']
|
@@ -1409,4 +1430,3 @@ def get_acc():
|
|
1409
1430
|
def get_pot():
|
1410
1431
|
|
1411
1432
|
return 1
|
1412
|
-
|
plan_di/plan_di.py
CHANGED
@@ -1223,75 +1223,87 @@ def decode_one_hot(encoded_data):
|
|
1223
1223
|
|
1224
1224
|
def roc_curve(y_true, y_score):
|
1225
1225
|
"""
|
1226
|
-
|
1226
|
+
Compute Receiver Operating Characteristic (ROC) curve.
|
1227
1227
|
|
1228
|
-
|
1229
|
-
|
1230
|
-
|
1228
|
+
Parameters:
|
1229
|
+
y_true : array, shape = [n_samples]
|
1230
|
+
True binary labels in range {0, 1} or {-1, 1}.
|
1231
|
+
y_score : array, shape = [n_samples]
|
1232
|
+
Target scores, can either be probability estimates of the positive class,
|
1233
|
+
confidence values, or non-thresholded measure of decisions (as returned
|
1234
|
+
by decision_function on some classifiers).
|
1231
1235
|
|
1232
1236
|
Returns:
|
1233
|
-
|
1237
|
+
fpr : array, shape = [n]
|
1238
|
+
Increasing false positive rates such that element i is the false positive rate
|
1239
|
+
of predictions with score >= thresholds[i].
|
1240
|
+
tpr : array, shape = [n]
|
1241
|
+
Increasing true positive rates such that element i is the true positive rate
|
1242
|
+
of predictions with score >= thresholds[i].
|
1243
|
+
thresholds : array, shape = [n]
|
1244
|
+
Decreasing thresholds on the decision function used to compute fpr and tpr.
|
1234
1245
|
"""
|
1246
|
+
|
1247
|
+
y_true = np.asarray(y_true)
|
1248
|
+
y_score = np.asarray(y_score)
|
1235
1249
|
|
1236
|
-
|
1237
|
-
|
1238
|
-
y_true_sorted = y_true[idx]
|
1250
|
+
if len(np.unique(y_true)) != 2:
|
1251
|
+
raise ValueError("Only binary classification is supported.")
|
1239
1252
|
|
1240
|
-
|
1241
|
-
|
1242
|
-
|
1253
|
+
|
1254
|
+
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
|
1255
|
+
y_score = y_score[desc_score_indices]
|
1256
|
+
y_true = y_true[desc_score_indices]
|
1243
1257
|
|
1244
|
-
for threshold in thresholds:
|
1245
|
-
y_pred_binary = np.where(y_score_sorted >= threshold, 1, 0)
|
1246
1258
|
|
1247
|
-
|
1248
|
-
|
1249
|
-
|
1250
|
-
|
1259
|
+
fpr = []
|
1260
|
+
tpr = []
|
1261
|
+
thresholds = []
|
1262
|
+
n_pos = np.sum(y_true)
|
1263
|
+
n_neg = len(y_true) - n_pos
|
1251
1264
|
|
1252
|
-
|
1253
|
-
|
1254
|
-
|
1255
|
-
else:
|
1256
|
-
tpr_value = tp / (tp + fn)
|
1265
|
+
tp = 0
|
1266
|
+
fp = 0
|
1267
|
+
prev_score = None
|
1257
1268
|
|
1258
|
-
|
1259
|
-
|
1269
|
+
|
1270
|
+
for i, score in enumerate(y_score):
|
1271
|
+
if score != prev_score:
|
1272
|
+
fpr.append(fp / n_neg)
|
1273
|
+
tpr.append(tp / n_pos)
|
1274
|
+
thresholds.append(score)
|
1275
|
+
prev_score = score
|
1276
|
+
|
1277
|
+
if y_true[i] == 1:
|
1278
|
+
tp += 1
|
1260
1279
|
else:
|
1261
|
-
|
1280
|
+
fp += 1
|
1262
1281
|
|
1263
|
-
|
1264
|
-
|
1282
|
+
fpr.append(fp / n_neg)
|
1283
|
+
tpr.append(tp / n_pos)
|
1284
|
+
thresholds.append(score)
|
1265
1285
|
|
1266
|
-
return fpr, tpr, thresholds
|
1286
|
+
return np.array(fpr), np.array(tpr), np.array(thresholds)
|
1267
1287
|
|
1268
1288
|
|
1269
|
-
def confusion_matrix(y_true, y_pred, class_count
|
1289
|
+
def confusion_matrix(y_true, y_pred, class_count):
|
1270
1290
|
"""
|
1271
1291
|
Computes confusion matrix.
|
1272
1292
|
|
1273
1293
|
Args:
|
1274
1294
|
y_true (numpy.ndarray): True class labels (1D array).
|
1275
1295
|
y_pred (numpy.ndarray): Predicted class labels (1D array).
|
1276
|
-
|
1296
|
+
num_classes (int): Number of classes.
|
1277
1297
|
|
1278
1298
|
Returns:
|
1279
1299
|
numpy.ndarray: Confusion matrix of shape (num_classes, num_classes).
|
1280
1300
|
"""
|
1281
|
-
if class_count is None:
|
1282
|
-
class_count = len(np.unique(np.concatenate((y_true, y_pred))))
|
1283
|
-
|
1284
1301
|
confusion = np.zeros((class_count, class_count), dtype=int)
|
1285
1302
|
|
1286
1303
|
for i in range(len(y_true)):
|
1287
1304
|
true_label = y_true[i]
|
1288
1305
|
pred_label = y_pred[i]
|
1289
|
-
|
1290
|
-
# Ensure that true_label and pred_label are within the correct range
|
1291
|
-
if 0 <= true_label < class_count and 0 <= pred_label < class_count:
|
1292
|
-
confusion[true_label, pred_label] += 1
|
1293
|
-
else:
|
1294
|
-
print(f"Warning: Ignoring out of range label - True: {true_label}, Predicted: {pred_label}")
|
1306
|
+
confusion[true_label, pred_label] += 1
|
1295
1307
|
|
1296
1308
|
return confusion
|
1297
1309
|
|
@@ -1305,7 +1317,7 @@ def plot_evaluate(y_test, y_preds, acc_list):
|
|
1305
1317
|
y_preds = np.array(y_preds)
|
1306
1318
|
fpr, tpr, thresholds = roc_curve(y_true, y_preds)
|
1307
1319
|
precision, recall, f1 = metrics(y_test, y_preds)
|
1308
|
-
Class = np.unique(y_test)
|
1320
|
+
Class = np.unique(decode_one_hot(y_test))
|
1309
1321
|
|
1310
1322
|
# Confusion matrix
|
1311
1323
|
cm = confusion_matrix(y_true, y_preds, len(Class))
|
@@ -1319,7 +1331,8 @@ def plot_evaluate(y_test, y_preds, acc_list):
|
|
1319
1331
|
axs[0, 0].set_ylabel("Actual Class")
|
1320
1332
|
|
1321
1333
|
# ROC Curve
|
1322
|
-
|
1334
|
+
roc_auc = np.trapz(tpr, fpr)
|
1335
|
+
axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
|
1323
1336
|
axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
|
1324
1337
|
axs[1, 0].set_xlim([0.0, 1.0])
|
1325
1338
|
axs[1, 0].set_ylim([0.0, 1.05])
|
@@ -1327,16 +1340,17 @@ def plot_evaluate(y_test, y_preds, acc_list):
|
|
1327
1340
|
axs[1, 0].set_ylabel('True Positive Rate')
|
1328
1341
|
axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
|
1329
1342
|
axs[1, 0].legend(loc="lower right")
|
1343
|
+
axs[1, 0].legend(loc="lower right")
|
1330
1344
|
|
1331
|
-
# Precision, Recall, F1 Score
|
1345
|
+
# Precision, Recall, F1 Score, Accuracy
|
1332
1346
|
metric = ['Precision', 'Recall', 'F1 Score', 'Accuracy']
|
1333
1347
|
values = [precision, recall, f1, acc]
|
1334
1348
|
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728']
|
1335
1349
|
|
1336
|
-
#
|
1350
|
+
#
|
1337
1351
|
bars = axs[0, 1].bar(metric, values, color=colors)
|
1338
1352
|
|
1339
|
-
|
1353
|
+
|
1340
1354
|
for bar, value in zip(bars, values):
|
1341
1355
|
axs[0, 1].text(bar.get_x() + bar.get_width() / 2, bar.get_height() - 0.05, f'{value:.2f}',
|
1342
1356
|
ha='center', va='bottom', fontsize=12, color='white', weight='bold')
|
@@ -1351,16 +1365,16 @@ def plot_evaluate(y_test, y_preds, acc_list):
|
|
1351
1365
|
plt.plot(acc_list, marker='o', linestyle='-',
|
1352
1366
|
color='r', label='Accuracy')
|
1353
1367
|
|
1354
|
-
|
1368
|
+
|
1355
1369
|
plt.axhline(y=1, color='g', linestyle='--', label='Maximum Accuracy')
|
1356
1370
|
|
1357
|
-
|
1371
|
+
|
1358
1372
|
plt.xlabel('Samples')
|
1359
1373
|
plt.ylabel('Accuracy')
|
1360
1374
|
plt.title('Accuracy History')
|
1361
1375
|
plt.legend()
|
1362
1376
|
|
1363
|
-
|
1377
|
+
|
1364
1378
|
plt.tight_layout()
|
1365
1379
|
plt.show()
|
1366
1380
|
|
@@ -1381,4 +1395,4 @@ def get_preds():
|
|
1381
1395
|
|
1382
1396
|
def get_acc():
|
1383
1397
|
|
1384
|
-
return 2
|
1398
|
+
return 2
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 2.1.
|
3
|
+
Version: 2.1.8
|
4
4
|
Summary: 8 new functions: multiple_evaluate , encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate And Code improvements (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -0,0 +1,8 @@
|
|
1
|
+
plan_bi/__init__.py,sha256=82q8bWRYqzwMrFuViQzBg7P19i6EqdV7VYBVxuQ-LV0,517
|
2
|
+
plan_bi/plan_bi.py,sha256=X07a6h4lsujU6ZOr5jvYJ8SHnqF-pZv0rC1NdqDIn2g,47912
|
3
|
+
plan_di/__init__.py,sha256=Eut7tVtvQaczEejYyqfQ4eqF71j69josJcY91WN_dkk,508
|
4
|
+
plan_di/plan_di.py,sha256=nnxUtyl2IZsriO0XPG15zFFsKnrMt0idQ9aGX4wCZ0M,44779
|
5
|
+
pyerualjetwork-2.1.8.dist-info/METADATA,sha256=m7n0aG1mKt7J9QFBIc7A8zoTZytqVVY3aqEZuO_0Z4c,457
|
6
|
+
pyerualjetwork-2.1.8.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
7
|
+
pyerualjetwork-2.1.8.dist-info/top_level.txt,sha256=aaXSOcnD62fbXG1x7tw4nV50Qxx9g9zDNLK7OD4BdPE,16
|
8
|
+
pyerualjetwork-2.1.8.dist-info/RECORD,,
|
@@ -1,8 +0,0 @@
|
|
1
|
-
plan_bi/__init__.py,sha256=82q8bWRYqzwMrFuViQzBg7P19i6EqdV7VYBVxuQ-LV0,517
|
2
|
-
plan_bi/plan_bi.py,sha256=mfBNSNqoTtXKvPZNCZuNYkVJnkOmg1JpM2FsLhE0nak,47203
|
3
|
-
plan_di/__init__.py,sha256=Eut7tVtvQaczEejYyqfQ4eqF71j69josJcY91WN_dkk,508
|
4
|
-
plan_di/plan_di.py,sha256=fiaYet8F6N1ad3xIvzLwgUZwridxuc-i03cZifoyJfc,44581
|
5
|
-
pyerualjetwork-2.1.6.dist-info/METADATA,sha256=T2M8jWa5LYTkdXcTrzmoHOKVeGdVdgrf_1UXaomsOw4,457
|
6
|
-
pyerualjetwork-2.1.6.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
7
|
-
pyerualjetwork-2.1.6.dist-info/top_level.txt,sha256=aaXSOcnD62fbXG1x7tw4nV50Qxx9g9zDNLK7OD4BdPE,16
|
8
|
-
pyerualjetwork-2.1.6.dist-info/RECORD,,
|
File without changes
|
File without changes
|