pyerualjetwork 2.1.9__py3-none-any.whl → 2.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
plan_bi/plan_bi.py CHANGED
@@ -1171,13 +1171,14 @@ def split(X, y, test_size, random_state):
1171
1171
  return x_train, x_test, y_train, y_test
1172
1172
 
1173
1173
 
1174
- def metrics(y_ts, test_preds):
1174
+ def metrics(y_ts, test_preds, average='weighted'):
1175
1175
  """
1176
1176
  Calculates precision, recall and F1 score for a classification task.
1177
-
1177
+
1178
1178
  Args:
1179
- y_test (list or numpy.ndarray): True labels.
1179
+ y_ts (list or numpy.ndarray): True labels.
1180
1180
  test_preds (list or numpy.ndarray): Predicted labels.
1181
+ average (str): Type of averaging ('micro', 'macro', 'weighted').
1181
1182
 
1182
1183
  Returns:
1183
1184
  tuple: Precision, recall, F1 score.
@@ -1218,17 +1219,29 @@ def metrics(y_ts, test_preds):
1218
1219
  for c in classes:
1219
1220
  precision[c] = tp[c] / (tp[c] + fp[c]) if (tp[c] + fp[c]) > 0 else 0
1220
1221
  recall[c] = tp[c] / (tp[c] + fn[c]) if (tp[c] + fn[c]) > 0 else 0
1221
- f1[c] = 2 * (precision[c] * recall[c]) / (precision[c] +
1222
- recall[c]) if (precision[c] + recall[c]) > 0 else 0
1222
+ f1[c] = 2 * (precision[c] * recall[c]) / (precision[c] + recall[c]) if (precision[c] + recall[c]) > 0 else 0
1223
+
1224
+ if average == 'micro':
1225
+ precision_val = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(fp.values()))) if (np.sum(list(tp.values())) + np.sum(list(fp.values()))) > 0 else 0
1226
+ recall_val = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(fn.values()))) if (np.sum(list(tp.values())) + np.sum(list(fn.values()))) > 0 else 0
1227
+ f1_val = 2 * (precision_val * recall_val) / (precision_val + recall_val) if (precision_val + recall_val) > 0 else 0
1228
+
1229
+ elif average == 'macro':
1230
+ precision_val = np.mean(list(precision.values()))
1231
+ recall_val = np.mean(list(recall.values()))
1232
+ f1_val = np.mean(list(f1.values()))
1233
+
1234
+ elif average == 'weighted':
1235
+ weights = np.array([np.sum(y_test_d == c) for c in classes])
1236
+ weights = weights / np.sum(weights)
1237
+ precision_val = np.sum([weights[i] * precision[classes[i]] for i in range(len(classes))])
1238
+ recall_val = np.sum([weights[i] * recall[classes[i]] for i in range(len(classes))])
1239
+ f1_val = np.sum([weights[i] * f1[classes[i]] for i in range(len(classes))])
1223
1240
 
1224
- micro_precision = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(
1225
- list(fp.values()))) if (np.sum(list(tp.values())) + np.sum(list(fp.values()))) > 0 else 0
1226
- micro_recall = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(
1227
- fn.values()))) if (np.sum(list(tp.values())) + np.sum(list(fn.values()))) > 0 else 0
1228
- micro_f1 = 2 * (micro_precision * micro_recall) / (micro_precision +
1229
- micro_recall) if (micro_precision + micro_recall) > 0 else 0
1241
+ else:
1242
+ raise ValueError("Invalid value for 'average'. Choose from 'micro', 'macro', 'weighted'.")
1230
1243
 
1231
- return micro_precision, micro_recall, micro_f1
1244
+ return precision_val, recall_val, f1_val
1232
1245
 
1233
1246
 
1234
1247
  def decode_one_hot(encoded_data):
@@ -1378,21 +1391,49 @@ def plot_evaluate(y_test, y_preds, acc_list):
1378
1391
  axs[1, 0].legend(loc="lower right")
1379
1392
  axs[1, 0].legend(loc="lower right")
1380
1393
  else:
1381
- accuracy_per_class = []
1382
-
1383
- for cls in Class:
1384
- correct = np.sum((y_true == cls) & (y_preds == cls))
1385
- total = np.sum(y_true == cls)
1386
- accuracy_cls = correct / total if total > 0 else 0.0
1387
- accuracy_per_class.append(accuracy_cls)
1388
-
1389
- axs[1, 0].bar(Class, accuracy_per_class, color='b', alpha=0.7)
1390
- axs[1, 0].set_xlabel('Class')
1391
- axs[1, 0].set_ylabel('Accuracy')
1392
- axs[1, 0].set_title('Class-wise Accuracy')
1393
- axs[1, 0].set_xticks(Class)
1394
- axs[1, 0].grid(True)
1395
1394
 
1395
+ for i in range(len(Class)):
1396
+
1397
+ y_true_copy = np.copy(y_true)
1398
+ y_preds_copy = np.copy(y_preds)
1399
+
1400
+ y_true_copy[y_true_copy == i] = 0
1401
+ y_true_copy[y_true_copy != 0] = 1
1402
+
1403
+ y_preds_copy[y_preds_copy == i] = 0
1404
+ y_preds_copy[y_preds_copy != 0] = 1
1405
+
1406
+
1407
+ fpr, tpr, thresholds = roc_curve(y_true_copy, y_preds_copy)
1408
+
1409
+ roc_auc = np.trapz(tpr, fpr)
1410
+ axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
1411
+ axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
1412
+ axs[1, 0].set_xlim([0.0, 1.0])
1413
+ axs[1, 0].set_ylim([0.0, 1.05])
1414
+ axs[1, 0].set_xlabel('False Positive Rate')
1415
+ axs[1, 0].set_ylabel('True Positive Rate')
1416
+ axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
1417
+ axs[1, 0].legend(loc="lower right")
1418
+ axs[1, 0].legend(loc="lower right")
1419
+
1420
+
1421
+ """
1422
+ accuracy_per_class = []
1423
+
1424
+ for cls in Class:
1425
+ correct = np.sum((y_true == cls) & (y_preds == cls))
1426
+ total = np.sum(y_true == cls)
1427
+ accuracy_cls = correct / total if total > 0 else 0.0
1428
+ accuracy_per_class.append(accuracy_cls)
1429
+
1430
+ axs[2, 0].bar(Class, accuracy_per_class, color='b', alpha=0.7)
1431
+ axs[2, 0].set_xlabel('Class')
1432
+ axs[2, 0].set_ylabel('Accuracy')
1433
+ axs[2, 0].set_title('Class-wise Accuracy')
1434
+ axs[2, 0].set_xticks(Class)
1435
+ axs[2, 0].grid(True)
1436
+ """
1396
1437
 
1397
1438
 
1398
1439
 
@@ -1413,7 +1454,7 @@ def plot_evaluate(y_test, y_preds, acc_list):
1413
1454
  axs[0, 1].set_ylim(0, 1) # Y eksenini 0 ile 1 arasında sınırla
1414
1455
  axs[0, 1].set_xlabel('Metrics')
1415
1456
  axs[0, 1].set_ylabel('Score')
1416
- axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy')
1457
+ axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy (Wighted)')
1417
1458
  axs[0, 1].grid(True, axis='y', linestyle='--', alpha=0.7)
1418
1459
 
1419
1460
  # Accuracy
plan_di/plan_di.py CHANGED
@@ -1145,13 +1145,14 @@ def split(X, y, test_size, random_state):
1145
1145
  return x_train, x_test, y_train, y_test
1146
1146
 
1147
1147
 
1148
- def metrics(y_ts, test_preds):
1148
+ def metrics(y_ts, test_preds, average='weighted'):
1149
1149
  """
1150
1150
  Calculates precision, recall and F1 score for a classification task.
1151
-
1151
+
1152
1152
  Args:
1153
- y_test (list or numpy.ndarray): True labels.
1153
+ y_ts (list or numpy.ndarray): True labels.
1154
1154
  test_preds (list or numpy.ndarray): Predicted labels.
1155
+ average (str): Type of averaging ('micro', 'macro', 'weighted').
1155
1156
 
1156
1157
  Returns:
1157
1158
  tuple: Precision, recall, F1 score.
@@ -1192,17 +1193,29 @@ def metrics(y_ts, test_preds):
1192
1193
  for c in classes:
1193
1194
  precision[c] = tp[c] / (tp[c] + fp[c]) if (tp[c] + fp[c]) > 0 else 0
1194
1195
  recall[c] = tp[c] / (tp[c] + fn[c]) if (tp[c] + fn[c]) > 0 else 0
1195
- f1[c] = 2 * (precision[c] * recall[c]) / (precision[c] +
1196
- recall[c]) if (precision[c] + recall[c]) > 0 else 0
1196
+ f1[c] = 2 * (precision[c] * recall[c]) / (precision[c] + recall[c]) if (precision[c] + recall[c]) > 0 else 0
1197
+
1198
+ if average == 'micro':
1199
+ precision_val = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(fp.values()))) if (np.sum(list(tp.values())) + np.sum(list(fp.values()))) > 0 else 0
1200
+ recall_val = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(fn.values()))) if (np.sum(list(tp.values())) + np.sum(list(fn.values()))) > 0 else 0
1201
+ f1_val = 2 * (precision_val * recall_val) / (precision_val + recall_val) if (precision_val + recall_val) > 0 else 0
1202
+
1203
+ elif average == 'macro':
1204
+ precision_val = np.mean(list(precision.values()))
1205
+ recall_val = np.mean(list(recall.values()))
1206
+ f1_val = np.mean(list(f1.values()))
1207
+
1208
+ elif average == 'weighted':
1209
+ weights = np.array([np.sum(y_test_d == c) for c in classes])
1210
+ weights = weights / np.sum(weights)
1211
+ precision_val = np.sum([weights[i] * precision[classes[i]] for i in range(len(classes))])
1212
+ recall_val = np.sum([weights[i] * recall[classes[i]] for i in range(len(classes))])
1213
+ f1_val = np.sum([weights[i] * f1[classes[i]] for i in range(len(classes))])
1197
1214
 
1198
- micro_precision = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(
1199
- list(fp.values()))) if (np.sum(list(tp.values())) + np.sum(list(fp.values()))) > 0 else 0
1200
- micro_recall = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(
1201
- fn.values()))) if (np.sum(list(tp.values())) + np.sum(list(fn.values()))) > 0 else 0
1202
- micro_f1 = 2 * (micro_precision * micro_recall) / (micro_precision +
1203
- micro_recall) if (micro_precision + micro_recall) > 0 else 0
1215
+ else:
1216
+ raise ValueError("Invalid value for 'average'. Choose from 'micro', 'macro', 'weighted'.")
1204
1217
 
1205
- return micro_precision, micro_recall, micro_f1
1218
+ return precision_val, recall_val, f1_val
1206
1219
 
1207
1220
 
1208
1221
  def decode_one_hot(encoded_data):
@@ -1345,21 +1358,49 @@ def plot_evaluate(y_test, y_preds, acc_list):
1345
1358
  axs[1, 0].legend(loc="lower right")
1346
1359
  axs[1, 0].legend(loc="lower right")
1347
1360
  else:
1348
- accuracy_per_class = []
1349
-
1350
- for cls in Class:
1351
- correct = np.sum((y_true == cls) & (y_preds == cls))
1352
- total = np.sum(y_true == cls)
1353
- accuracy_cls = correct / total if total > 0 else 0.0
1354
- accuracy_per_class.append(accuracy_cls)
1355
-
1356
- axs[1, 0].bar(Class, accuracy_per_class, color='b', alpha=0.7)
1357
- axs[1, 0].set_xlabel('Class')
1358
- axs[1, 0].set_ylabel('Accuracy')
1359
- axs[1, 0].set_title('Class-wise Accuracy')
1360
- axs[1, 0].set_xticks(Class)
1361
- axs[1, 0].grid(True)
1362
1361
 
1362
+ for i in range(len(Class)):
1363
+
1364
+ y_true_copy = np.copy(y_true)
1365
+ y_preds_copy = np.copy(y_preds)
1366
+
1367
+ y_true_copy[y_true_copy == i] = 0
1368
+ y_true_copy[y_true_copy != 0] = 1
1369
+
1370
+ y_preds_copy[y_preds_copy == i] = 0
1371
+ y_preds_copy[y_preds_copy != 0] = 1
1372
+
1373
+
1374
+ fpr, tpr, thresholds = roc_curve(y_true_copy, y_preds_copy)
1375
+
1376
+ roc_auc = np.trapz(tpr, fpr)
1377
+ axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
1378
+ axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
1379
+ axs[1, 0].set_xlim([0.0, 1.0])
1380
+ axs[1, 0].set_ylim([0.0, 1.05])
1381
+ axs[1, 0].set_xlabel('False Positive Rate')
1382
+ axs[1, 0].set_ylabel('True Positive Rate')
1383
+ axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
1384
+ axs[1, 0].legend(loc="lower right")
1385
+ axs[1, 0].legend(loc="lower right")
1386
+
1387
+
1388
+ """
1389
+ accuracy_per_class = []
1390
+
1391
+ for cls in Class:
1392
+ correct = np.sum((y_true == cls) & (y_preds == cls))
1393
+ total = np.sum(y_true == cls)
1394
+ accuracy_cls = correct / total if total > 0 else 0.0
1395
+ accuracy_per_class.append(accuracy_cls)
1396
+
1397
+ axs[2, 0].bar(Class, accuracy_per_class, color='b', alpha=0.7)
1398
+ axs[2, 0].set_xlabel('Class')
1399
+ axs[2, 0].set_ylabel('Accuracy')
1400
+ axs[2, 0].set_title('Class-wise Accuracy')
1401
+ axs[2, 0].set_xticks(Class)
1402
+ axs[2, 0].grid(True)
1403
+ """
1363
1404
 
1364
1405
 
1365
1406
 
@@ -1380,7 +1421,7 @@ def plot_evaluate(y_test, y_preds, acc_list):
1380
1421
  axs[0, 1].set_ylim(0, 1) # Y eksenini 0 ile 1 arasında sınırla
1381
1422
  axs[0, 1].set_xlabel('Metrics')
1382
1423
  axs[0, 1].set_ylabel('Score')
1383
- axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy')
1424
+ axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy (Wighted)')
1384
1425
  axs[0, 1].grid(True, axis='y', linestyle='--', alpha=0.7)
1385
1426
 
1386
1427
  # Accuracy
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 2.1.9
3
+ Version: 2.2.0
4
4
  Summary: 8 new functions: multiple_evaluate , encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate And Code improvements (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
@@ -0,0 +1,8 @@
1
+ plan_bi/__init__.py,sha256=82q8bWRYqzwMrFuViQzBg7P19i6EqdV7VYBVxuQ-LV0,517
2
+ plan_bi/plan_bi.py,sha256=tlxrr8-HJ1XodT0v5I6jS6IkrZUQEMKKlMY1tRihy-E,50588
3
+ plan_di/__init__.py,sha256=Eut7tVtvQaczEejYyqfQ4eqF71j69josJcY91WN_dkk,508
4
+ plan_di/plan_di.py,sha256=UKSp5YaTdG8jKAGrkSmwJl3gA81aU4MSJ6WyQEZatXs,47338
5
+ pyerualjetwork-2.2.0.dist-info/METADATA,sha256=iVpSNQ8pjr_EduOzO7xEEqc0WGJwtFCyrKIg4SvlW1U,457
6
+ pyerualjetwork-2.2.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
7
+ pyerualjetwork-2.2.0.dist-info/top_level.txt,sha256=aaXSOcnD62fbXG1x7tw4nV50Qxx9g9zDNLK7OD4BdPE,16
8
+ pyerualjetwork-2.2.0.dist-info/RECORD,,
@@ -1,8 +0,0 @@
1
- plan_bi/__init__.py,sha256=82q8bWRYqzwMrFuViQzBg7P19i6EqdV7VYBVxuQ-LV0,517
2
- plan_bi/plan_bi.py,sha256=5lzUpnRV5hKhSsErqojShC9jtJ0Uy83Two90jmKOvZk,48701
3
- plan_di/__init__.py,sha256=Eut7tVtvQaczEejYyqfQ4eqF71j69josJcY91WN_dkk,508
4
- plan_di/plan_di.py,sha256=LYfuCW2KkzP-rACl4jt9zTry96-L-nSOJSf1tS9pXr0,45451
5
- pyerualjetwork-2.1.9.dist-info/METADATA,sha256=QrFkTmYHTP1coZUcEoB8iKxsCXe-njXxheq_6d4JBJc,457
6
- pyerualjetwork-2.1.9.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
7
- pyerualjetwork-2.1.9.dist-info/top_level.txt,sha256=aaXSOcnD62fbXG1x7tw4nV50Qxx9g9zDNLK7OD4BdPE,16
8
- pyerualjetwork-2.1.9.dist-info/RECORD,,