pyerualjetwork 2.1.7__py3-none-any.whl → 2.1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
plan_bi/plan_bi.py CHANGED
@@ -1144,7 +1144,7 @@ def split(X, y, test_size, random_state):
1144
1144
  Returns:
1145
1145
  tuple: x_train, x_test, y_train, y_test as ordered training and testing data subsets.
1146
1146
  """
1147
- # Size of the dataset
1147
+
1148
1148
  num_samples = X.shape[0]
1149
1149
 
1150
1150
  if isinstance(test_size, float):
@@ -1249,48 +1249,67 @@ def decode_one_hot(encoded_data):
1249
1249
 
1250
1250
  def roc_curve(y_true, y_score):
1251
1251
  """
1252
- Computes ROC curve.
1252
+ Compute Receiver Operating Characteristic (ROC) curve.
1253
1253
 
1254
- Args:
1255
- y_true (numpy.ndarray): True class labels (binary: 0 or 1).
1256
- y_score (numpy.ndarray): Predicted probabilities for positive class.
1254
+ Parameters:
1255
+ y_true : array, shape = [n_samples]
1256
+ True binary labels in range {0, 1} or {-1, 1}.
1257
+ y_score : array, shape = [n_samples]
1258
+ Target scores, can either be probability estimates of the positive class,
1259
+ confidence values, or non-thresholded measure of decisions (as returned
1260
+ by decision_function on some classifiers).
1257
1261
 
1258
1262
  Returns:
1259
- tuple: FPR (False Positive Rate), TPR (True Positive Rate), thresholds.
1263
+ fpr : array, shape = [n]
1264
+ Increasing false positive rates such that element i is the false positive rate
1265
+ of predictions with score >= thresholds[i].
1266
+ tpr : array, shape = [n]
1267
+ Increasing true positive rates such that element i is the true positive rate
1268
+ of predictions with score >= thresholds[i].
1269
+ thresholds : array, shape = [n]
1270
+ Decreasing thresholds on the decision function used to compute fpr and tpr.
1260
1271
  """
1272
+
1273
+ y_true = np.asarray(y_true)
1274
+ y_score = np.asarray(y_score)
1261
1275
 
1262
- idx = np.argsort(y_score)[::-1]
1263
- y_score_sorted = y_score[idx]
1264
- y_true_sorted = y_true[idx]
1276
+ if len(np.unique(y_true)) != 2:
1277
+ raise ValueError("Only binary classification is supported.")
1265
1278
 
1266
- tpr = []
1267
- fpr = []
1268
- thresholds = np.linspace(0, 1, 100)
1279
+
1280
+ desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
1281
+ y_score = y_score[desc_score_indices]
1282
+ y_true = y_true[desc_score_indices]
1269
1283
 
1270
- for threshold in thresholds:
1271
- y_pred_binary = np.where(y_score_sorted >= threshold, 1, 0)
1272
1284
 
1273
- tp = np.sum((y_true_sorted == 1) & (y_pred_binary == 1))
1274
- fn = np.sum((y_true_sorted == 1) & (y_pred_binary == 0))
1275
- tn = np.sum((y_true_sorted == 0) & (y_pred_binary == 0))
1276
- fp = np.sum((y_true_sorted == 0) & (y_pred_binary == 1))
1285
+ fpr = []
1286
+ tpr = []
1287
+ thresholds = []
1288
+ n_pos = np.sum(y_true)
1289
+ n_neg = len(y_true) - n_pos
1277
1290
 
1278
- # Check for division by zero
1279
- if (tp + fn) == 0:
1280
- tpr_value = 0.0
1281
- else:
1282
- tpr_value = tp / (tp + fn)
1291
+ tp = 0
1292
+ fp = 0
1293
+ prev_score = None
1283
1294
 
1284
- if (fp + tn) == 0:
1285
- fpr_value = 0.0
1286
- else:
1287
- fpr_value = fp / (fp + tn)
1295
+
1296
+ for i, score in enumerate(y_score):
1297
+ if score != prev_score:
1298
+ fpr.append(fp / n_neg)
1299
+ tpr.append(tp / n_pos)
1300
+ thresholds.append(score)
1301
+ prev_score = score
1288
1302
 
1289
- tpr.append(tpr_value)
1290
- fpr.append(fpr_value)
1303
+ if y_true[i] == 1:
1304
+ tp += 1
1305
+ else:
1306
+ fp += 1
1291
1307
 
1292
- return fpr, tpr, thresholds
1308
+ fpr.append(fp / n_neg)
1309
+ tpr.append(tp / n_pos)
1310
+ thresholds.append(score)
1293
1311
 
1312
+ return np.array(fpr), np.array(tpr), np.array(thresholds)
1294
1313
 
1295
1314
  def confusion_matrix(y_true, y_pred, class_count=None):
1296
1315
  """
@@ -1313,7 +1332,7 @@ def confusion_matrix(y_true, y_pred, class_count=None):
1313
1332
  true_label = y_true[i]
1314
1333
  pred_label = y_pred[i]
1315
1334
 
1316
- # Ensure that true_label and pred_label are within the correct range
1335
+
1317
1336
  if 0 <= true_label < class_count and 0 <= pred_label < class_count:
1318
1337
  confusion[true_label, pred_label] += 1
1319
1338
  else:
@@ -1345,7 +1364,8 @@ def plot_evaluate(y_test, y_preds, acc_list):
1345
1364
  axs[0, 0].set_ylabel("Actual Class")
1346
1365
 
1347
1366
  # ROC Curve
1348
- axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label='ROC curve')
1367
+ roc_auc = np.trapz(tpr, fpr)
1368
+ axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
1349
1369
  axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
1350
1370
  axs[1, 0].set_xlim([0.0, 1.0])
1351
1371
  axs[1, 0].set_ylim([0.0, 1.05])
@@ -1353,6 +1373,7 @@ def plot_evaluate(y_test, y_preds, acc_list):
1353
1373
  axs[1, 0].set_ylabel('True Positive Rate')
1354
1374
  axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
1355
1375
  axs[1, 0].legend(loc="lower right")
1376
+ axs[1, 0].legend(loc="lower right")
1356
1377
 
1357
1378
  # Precision, Recall, F1 Score, Accuracy
1358
1379
  metric = ['Precision', 'Recall', 'F1 Score', 'Accuracy']
@@ -1409,4 +1430,3 @@ def get_acc():
1409
1430
  def get_pot():
1410
1431
 
1411
1432
  return 1
1412
-
plan_di/plan_di.py CHANGED
@@ -1223,47 +1223,67 @@ def decode_one_hot(encoded_data):
1223
1223
 
1224
1224
  def roc_curve(y_true, y_score):
1225
1225
  """
1226
- Computes ROC curve.
1226
+ Compute Receiver Operating Characteristic (ROC) curve.
1227
1227
 
1228
- Args:
1229
- y_true (numpy.ndarray): True class labels (binary: 0 or 1).
1230
- y_score (numpy.ndarray): Predicted probabilities for positive class.
1228
+ Parameters:
1229
+ y_true : array, shape = [n_samples]
1230
+ True binary labels in range {0, 1} or {-1, 1}.
1231
+ y_score : array, shape = [n_samples]
1232
+ Target scores, can either be probability estimates of the positive class,
1233
+ confidence values, or non-thresholded measure of decisions (as returned
1234
+ by decision_function on some classifiers).
1231
1235
 
1232
1236
  Returns:
1233
- tuple: FPR (False Positive Rate), TPR (True Positive Rate), thresholds.
1237
+ fpr : array, shape = [n]
1238
+ Increasing false positive rates such that element i is the false positive rate
1239
+ of predictions with score >= thresholds[i].
1240
+ tpr : array, shape = [n]
1241
+ Increasing true positive rates such that element i is the true positive rate
1242
+ of predictions with score >= thresholds[i].
1243
+ thresholds : array, shape = [n]
1244
+ Decreasing thresholds on the decision function used to compute fpr and tpr.
1234
1245
  """
1246
+
1247
+ y_true = np.asarray(y_true)
1248
+ y_score = np.asarray(y_score)
1235
1249
 
1236
- idx = np.argsort(y_score)[::-1]
1237
- y_score_sorted = y_score[idx]
1238
- y_true_sorted = y_true[idx]
1250
+ if len(np.unique(y_true)) != 2:
1251
+ raise ValueError("Only binary classification is supported.")
1239
1252
 
1240
- tpr = []
1241
- fpr = []
1242
- thresholds = np.linspace(0, 1, 100)
1253
+
1254
+ desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
1255
+ y_score = y_score[desc_score_indices]
1256
+ y_true = y_true[desc_score_indices]
1243
1257
 
1244
- for threshold in thresholds:
1245
- y_pred_binary = np.where(y_score_sorted >= threshold, 1, 0)
1246
1258
 
1247
- tp = np.sum((y_true_sorted == 1) & (y_pred_binary == 1))
1248
- fn = np.sum((y_true_sorted == 1) & (y_pred_binary == 0))
1249
- tn = np.sum((y_true_sorted == 0) & (y_pred_binary == 0))
1250
- fp = np.sum((y_true_sorted == 0) & (y_pred_binary == 1))
1259
+ fpr = []
1260
+ tpr = []
1261
+ thresholds = []
1262
+ n_pos = np.sum(y_true)
1263
+ n_neg = len(y_true) - n_pos
1251
1264
 
1252
- # Check for division by zero
1253
- if (tp + fn) == 0:
1254
- tpr_value = 0.0
1255
- else:
1256
- tpr_value = tp / (tp + fn)
1265
+ tp = 0
1266
+ fp = 0
1267
+ prev_score = None
1257
1268
 
1258
- if (fp + tn) == 0:
1259
- fpr_value = 0.0
1269
+
1270
+ for i, score in enumerate(y_score):
1271
+ if score != prev_score:
1272
+ fpr.append(fp / n_neg)
1273
+ tpr.append(tp / n_pos)
1274
+ thresholds.append(score)
1275
+ prev_score = score
1276
+
1277
+ if y_true[i] == 1:
1278
+ tp += 1
1260
1279
  else:
1261
- fpr_value = fp / (fp + tn)
1280
+ fp += 1
1262
1281
 
1263
- tpr.append(tpr_value)
1264
- fpr.append(fpr_value)
1282
+ fpr.append(fp / n_neg)
1283
+ tpr.append(tp / n_pos)
1284
+ thresholds.append(score)
1265
1285
 
1266
- return fpr, tpr, thresholds
1286
+ return np.array(fpr), np.array(tpr), np.array(thresholds)
1267
1287
 
1268
1288
 
1269
1289
  def confusion_matrix(y_true, y_pred, class_count):
@@ -1311,7 +1331,8 @@ def plot_evaluate(y_test, y_preds, acc_list):
1311
1331
  axs[0, 0].set_ylabel("Actual Class")
1312
1332
 
1313
1333
  # ROC Curve
1314
- axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label='ROC curve')
1334
+ roc_auc = np.trapz(tpr, fpr)
1335
+ axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
1315
1336
  axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
1316
1337
  axs[1, 0].set_xlim([0.0, 1.0])
1317
1338
  axs[1, 0].set_ylim([0.0, 1.05])
@@ -1319,16 +1340,17 @@ def plot_evaluate(y_test, y_preds, acc_list):
1319
1340
  axs[1, 0].set_ylabel('True Positive Rate')
1320
1341
  axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
1321
1342
  axs[1, 0].legend(loc="lower right")
1343
+ axs[1, 0].legend(loc="lower right")
1322
1344
 
1323
- # Precision, Recall, F1 Score ve Accuracy bar grafiği
1345
+ # Precision, Recall, F1 Score, Accuracy
1324
1346
  metric = ['Precision', 'Recall', 'F1 Score', 'Accuracy']
1325
1347
  values = [precision, recall, f1, acc]
1326
1348
  colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728']
1327
1349
 
1328
- # Bar grafiği oluşturma
1350
+ #
1329
1351
  bars = axs[0, 1].bar(metric, values, color=colors)
1330
1352
 
1331
- # Bar grafiği üzerine değer ekleme
1353
+
1332
1354
  for bar, value in zip(bars, values):
1333
1355
  axs[0, 1].text(bar.get_x() + bar.get_width() / 2, bar.get_height() - 0.05, f'{value:.2f}',
1334
1356
  ha='center', va='bottom', fontsize=12, color='white', weight='bold')
@@ -1343,16 +1365,16 @@ def plot_evaluate(y_test, y_preds, acc_list):
1343
1365
  plt.plot(acc_list, marker='o', linestyle='-',
1344
1366
  color='r', label='Accuracy')
1345
1367
 
1346
- # Sabit yüksek değer için yatay çizgi
1368
+
1347
1369
  plt.axhline(y=1, color='g', linestyle='--', label='Maximum Accuracy')
1348
1370
 
1349
- # Eksen ve başlık ayarları
1371
+
1350
1372
  plt.xlabel('Samples')
1351
1373
  plt.ylabel('Accuracy')
1352
1374
  plt.title('Accuracy History')
1353
1375
  plt.legend()
1354
1376
 
1355
- # Subplot düzenleme
1377
+
1356
1378
  plt.tight_layout()
1357
1379
  plt.show()
1358
1380
 
@@ -1373,4 +1395,4 @@ def get_preds():
1373
1395
 
1374
1396
  def get_acc():
1375
1397
 
1376
- return 2
1398
+ return 2
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 2.1.7
3
+ Version: 2.1.8
4
4
  Summary: 8 new functions: multiple_evaluate , encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate And Code improvements (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
@@ -0,0 +1,8 @@
1
+ plan_bi/__init__.py,sha256=82q8bWRYqzwMrFuViQzBg7P19i6EqdV7VYBVxuQ-LV0,517
2
+ plan_bi/plan_bi.py,sha256=X07a6h4lsujU6ZOr5jvYJ8SHnqF-pZv0rC1NdqDIn2g,47912
3
+ plan_di/__init__.py,sha256=Eut7tVtvQaczEejYyqfQ4eqF71j69josJcY91WN_dkk,508
4
+ plan_di/plan_di.py,sha256=nnxUtyl2IZsriO0XPG15zFFsKnrMt0idQ9aGX4wCZ0M,44779
5
+ pyerualjetwork-2.1.8.dist-info/METADATA,sha256=m7n0aG1mKt7J9QFBIc7A8zoTZytqVVY3aqEZuO_0Z4c,457
6
+ pyerualjetwork-2.1.8.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
7
+ pyerualjetwork-2.1.8.dist-info/top_level.txt,sha256=aaXSOcnD62fbXG1x7tw4nV50Qxx9g9zDNLK7OD4BdPE,16
8
+ pyerualjetwork-2.1.8.dist-info/RECORD,,
@@ -1,8 +0,0 @@
1
- plan_bi/__init__.py,sha256=82q8bWRYqzwMrFuViQzBg7P19i6EqdV7VYBVxuQ-LV0,517
2
- plan_bi/plan_bi.py,sha256=m8KZHORNNPyNxglFn25oLPcUoz6H7GA6c6QdS6l79XE,47219
3
- plan_di/__init__.py,sha256=Eut7tVtvQaczEejYyqfQ4eqF71j69josJcY91WN_dkk,508
4
- plan_di/plan_di.py,sha256=vvAnYRA9NM3PXOk7gXp4aX9UHRsPZUVq50Rz2wwdc1Q,44160
5
- pyerualjetwork-2.1.7.dist-info/METADATA,sha256=ptRD_gNFfDWdq_EiYZgfG-vDPbq6KUiQNYNI2bUL-J4,457
6
- pyerualjetwork-2.1.7.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
7
- pyerualjetwork-2.1.7.dist-info/top_level.txt,sha256=aaXSOcnD62fbXG1x7tw4nV50Qxx9g9zDNLK7OD4BdPE,16
8
- pyerualjetwork-2.1.7.dist-info/RECORD,,