pyerualjetwork 2.1.8__tar.gz → 2.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pyerualjetwork-2.1.8 → pyerualjetwork-2.2.0}/PKG-INFO +1 -1
- {pyerualjetwork-2.1.8 → pyerualjetwork-2.2.0}/plan_bi/plan_bi.py +98 -35
- {pyerualjetwork-2.1.8 → pyerualjetwork-2.2.0}/plan_di/plan_di.py +89 -26
- {pyerualjetwork-2.1.8 → pyerualjetwork-2.2.0}/pyerualjetwork.egg-info/PKG-INFO +1 -1
- {pyerualjetwork-2.1.8 → pyerualjetwork-2.2.0}/setup.py +1 -1
- {pyerualjetwork-2.1.8 → pyerualjetwork-2.2.0}/plan_bi/__init__.py +0 -0
- {pyerualjetwork-2.1.8 → pyerualjetwork-2.2.0}/plan_di/__init__.py +0 -0
- {pyerualjetwork-2.1.8 → pyerualjetwork-2.2.0}/pyerualjetwork.egg-info/SOURCES.txt +0 -0
- {pyerualjetwork-2.1.8 → pyerualjetwork-2.2.0}/pyerualjetwork.egg-info/dependency_links.txt +0 -0
- {pyerualjetwork-2.1.8 → pyerualjetwork-2.2.0}/pyerualjetwork.egg-info/top_level.txt +0 -0
- {pyerualjetwork-2.1.8 → pyerualjetwork-2.2.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 2.
|
3
|
+
Version: 2.2.0
|
4
4
|
Summary: 8 new functions: multiple_evaluate , encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate And Code improvements (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -1171,13 +1171,14 @@ def split(X, y, test_size, random_state):
|
|
1171
1171
|
return x_train, x_test, y_train, y_test
|
1172
1172
|
|
1173
1173
|
|
1174
|
-
def metrics(y_ts, test_preds):
|
1174
|
+
def metrics(y_ts, test_preds, average='weighted'):
|
1175
1175
|
"""
|
1176
1176
|
Calculates precision, recall and F1 score for a classification task.
|
1177
|
-
|
1177
|
+
|
1178
1178
|
Args:
|
1179
|
-
|
1179
|
+
y_ts (list or numpy.ndarray): True labels.
|
1180
1180
|
test_preds (list or numpy.ndarray): Predicted labels.
|
1181
|
+
average (str): Type of averaging ('micro', 'macro', 'weighted').
|
1181
1182
|
|
1182
1183
|
Returns:
|
1183
1184
|
tuple: Precision, recall, F1 score.
|
@@ -1218,17 +1219,29 @@ def metrics(y_ts, test_preds):
|
|
1218
1219
|
for c in classes:
|
1219
1220
|
precision[c] = tp[c] / (tp[c] + fp[c]) if (tp[c] + fp[c]) > 0 else 0
|
1220
1221
|
recall[c] = tp[c] / (tp[c] + fn[c]) if (tp[c] + fn[c]) > 0 else 0
|
1221
|
-
f1[c] = 2 * (precision[c] * recall[c]) / (precision[c] +
|
1222
|
-
|
1222
|
+
f1[c] = 2 * (precision[c] * recall[c]) / (precision[c] + recall[c]) if (precision[c] + recall[c]) > 0 else 0
|
1223
|
+
|
1224
|
+
if average == 'micro':
|
1225
|
+
precision_val = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(fp.values()))) if (np.sum(list(tp.values())) + np.sum(list(fp.values()))) > 0 else 0
|
1226
|
+
recall_val = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(fn.values()))) if (np.sum(list(tp.values())) + np.sum(list(fn.values()))) > 0 else 0
|
1227
|
+
f1_val = 2 * (precision_val * recall_val) / (precision_val + recall_val) if (precision_val + recall_val) > 0 else 0
|
1228
|
+
|
1229
|
+
elif average == 'macro':
|
1230
|
+
precision_val = np.mean(list(precision.values()))
|
1231
|
+
recall_val = np.mean(list(recall.values()))
|
1232
|
+
f1_val = np.mean(list(f1.values()))
|
1233
|
+
|
1234
|
+
elif average == 'weighted':
|
1235
|
+
weights = np.array([np.sum(y_test_d == c) for c in classes])
|
1236
|
+
weights = weights / np.sum(weights)
|
1237
|
+
precision_val = np.sum([weights[i] * precision[classes[i]] for i in range(len(classes))])
|
1238
|
+
recall_val = np.sum([weights[i] * recall[classes[i]] for i in range(len(classes))])
|
1239
|
+
f1_val = np.sum([weights[i] * f1[classes[i]] for i in range(len(classes))])
|
1223
1240
|
|
1224
|
-
|
1225
|
-
|
1226
|
-
micro_recall = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(
|
1227
|
-
fn.values()))) if (np.sum(list(tp.values())) + np.sum(list(fn.values()))) > 0 else 0
|
1228
|
-
micro_f1 = 2 * (micro_precision * micro_recall) / (micro_precision +
|
1229
|
-
micro_recall) if (micro_precision + micro_recall) > 0 else 0
|
1241
|
+
else:
|
1242
|
+
raise ValueError("Invalid value for 'average'. Choose from 'micro', 'macro', 'weighted'.")
|
1230
1243
|
|
1231
|
-
return
|
1244
|
+
return precision_val, recall_val, f1_val
|
1232
1245
|
|
1233
1246
|
|
1234
1247
|
def decode_one_hot(encoded_data):
|
@@ -1348,13 +1361,14 @@ def plot_evaluate(y_test, y_preds, acc_list):
|
|
1348
1361
|
|
1349
1362
|
y_true = np.array(y_true)
|
1350
1363
|
y_preds = np.array(y_preds)
|
1351
|
-
fpr, tpr, thresholds = roc_curve(y_true, y_preds)
|
1352
|
-
precision, recall, f1 = metrics(y_test, y_preds)
|
1353
1364
|
Class = np.unique(decode_one_hot(y_test))
|
1365
|
+
|
1366
|
+
precision, recall, f1 = metrics(y_test, y_preds)
|
1354
1367
|
|
1355
|
-
|
1356
|
-
cm = confusion_matrix(y_true, y_preds, len(Class))
|
1357
1368
|
|
1369
|
+
# Confusion matrix
|
1370
|
+
cm = confusion_matrix(y_true, y_preds, len(Class))
|
1371
|
+
# Subplot içinde düzenleme
|
1358
1372
|
fig, axs = plt.subplots(2, 2, figsize=(16, 12))
|
1359
1373
|
|
1360
1374
|
# Confusion Matrix
|
@@ -1363,51 +1377,100 @@ def plot_evaluate(y_test, y_preds, acc_list):
|
|
1363
1377
|
axs[0, 0].set_xlabel("Predicted Class")
|
1364
1378
|
axs[0, 0].set_ylabel("Actual Class")
|
1365
1379
|
|
1366
|
-
|
1367
|
-
|
1368
|
-
|
1369
|
-
|
1370
|
-
|
1371
|
-
|
1372
|
-
|
1373
|
-
|
1374
|
-
|
1375
|
-
|
1376
|
-
|
1380
|
+
if len(Class) == 2:
|
1381
|
+
fpr, tpr, thresholds = roc_curve(y_true, y_preds)
|
1382
|
+
# ROC Curve
|
1383
|
+
roc_auc = np.trapz(tpr, fpr)
|
1384
|
+
axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
|
1385
|
+
axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
|
1386
|
+
axs[1, 0].set_xlim([0.0, 1.0])
|
1387
|
+
axs[1, 0].set_ylim([0.0, 1.05])
|
1388
|
+
axs[1, 0].set_xlabel('False Positive Rate')
|
1389
|
+
axs[1, 0].set_ylabel('True Positive Rate')
|
1390
|
+
axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
|
1391
|
+
axs[1, 0].legend(loc="lower right")
|
1392
|
+
axs[1, 0].legend(loc="lower right")
|
1393
|
+
else:
|
1394
|
+
|
1395
|
+
for i in range(len(Class)):
|
1396
|
+
|
1397
|
+
y_true_copy = np.copy(y_true)
|
1398
|
+
y_preds_copy = np.copy(y_preds)
|
1399
|
+
|
1400
|
+
y_true_copy[y_true_copy == i] = 0
|
1401
|
+
y_true_copy[y_true_copy != 0] = 1
|
1402
|
+
|
1403
|
+
y_preds_copy[y_preds_copy == i] = 0
|
1404
|
+
y_preds_copy[y_preds_copy != 0] = 1
|
1405
|
+
|
1406
|
+
|
1407
|
+
fpr, tpr, thresholds = roc_curve(y_true_copy, y_preds_copy)
|
1408
|
+
|
1409
|
+
roc_auc = np.trapz(tpr, fpr)
|
1410
|
+
axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
|
1411
|
+
axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
|
1412
|
+
axs[1, 0].set_xlim([0.0, 1.0])
|
1413
|
+
axs[1, 0].set_ylim([0.0, 1.05])
|
1414
|
+
axs[1, 0].set_xlabel('False Positive Rate')
|
1415
|
+
axs[1, 0].set_ylabel('True Positive Rate')
|
1416
|
+
axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
|
1417
|
+
axs[1, 0].legend(loc="lower right")
|
1418
|
+
axs[1, 0].legend(loc="lower right")
|
1419
|
+
|
1420
|
+
|
1421
|
+
"""
|
1422
|
+
accuracy_per_class = []
|
1423
|
+
|
1424
|
+
for cls in Class:
|
1425
|
+
correct = np.sum((y_true == cls) & (y_preds == cls))
|
1426
|
+
total = np.sum(y_true == cls)
|
1427
|
+
accuracy_cls = correct / total if total > 0 else 0.0
|
1428
|
+
accuracy_per_class.append(accuracy_cls)
|
1429
|
+
|
1430
|
+
axs[2, 0].bar(Class, accuracy_per_class, color='b', alpha=0.7)
|
1431
|
+
axs[2, 0].set_xlabel('Class')
|
1432
|
+
axs[2, 0].set_ylabel('Accuracy')
|
1433
|
+
axs[2, 0].set_title('Class-wise Accuracy')
|
1434
|
+
axs[2, 0].set_xticks(Class)
|
1435
|
+
axs[2, 0].grid(True)
|
1436
|
+
"""
|
1437
|
+
|
1438
|
+
|
1439
|
+
|
1377
1440
|
|
1378
1441
|
# Precision, Recall, F1 Score, Accuracy
|
1379
1442
|
metric = ['Precision', 'Recall', 'F1 Score', 'Accuracy']
|
1380
1443
|
values = [precision, recall, f1, acc]
|
1381
1444
|
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728']
|
1382
1445
|
|
1383
|
-
|
1446
|
+
#
|
1384
1447
|
bars = axs[0, 1].bar(metric, values, color=colors)
|
1385
1448
|
|
1386
|
-
|
1449
|
+
|
1387
1450
|
for bar, value in zip(bars, values):
|
1388
1451
|
axs[0, 1].text(bar.get_x() + bar.get_width() / 2, bar.get_height() - 0.05, f'{value:.2f}',
|
1389
1452
|
ha='center', va='bottom', fontsize=12, color='white', weight='bold')
|
1390
1453
|
|
1391
|
-
axs[0, 1].set_ylim(0, 1)
|
1454
|
+
axs[0, 1].set_ylim(0, 1) # Y eksenini 0 ile 1 arasında sınırla
|
1392
1455
|
axs[0, 1].set_xlabel('Metrics')
|
1393
1456
|
axs[0, 1].set_ylabel('Score')
|
1394
|
-
axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy')
|
1457
|
+
axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy (Wighted)')
|
1395
1458
|
axs[0, 1].grid(True, axis='y', linestyle='--', alpha=0.7)
|
1396
1459
|
|
1397
|
-
|
1460
|
+
# Accuracy
|
1398
1461
|
plt.plot(acc_list, marker='o', linestyle='-',
|
1399
1462
|
color='r', label='Accuracy')
|
1400
1463
|
|
1401
|
-
|
1464
|
+
|
1402
1465
|
plt.axhline(y=1, color='g', linestyle='--', label='Maximum Accuracy')
|
1403
1466
|
|
1404
|
-
|
1467
|
+
|
1405
1468
|
plt.xlabel('Samples')
|
1406
1469
|
plt.ylabel('Accuracy')
|
1407
1470
|
plt.title('Accuracy History')
|
1408
1471
|
plt.legend()
|
1409
1472
|
|
1410
|
-
|
1473
|
+
|
1411
1474
|
plt.tight_layout()
|
1412
1475
|
plt.show()
|
1413
1476
|
|
@@ -1145,13 +1145,14 @@ def split(X, y, test_size, random_state):
|
|
1145
1145
|
return x_train, x_test, y_train, y_test
|
1146
1146
|
|
1147
1147
|
|
1148
|
-
def metrics(y_ts, test_preds):
|
1148
|
+
def metrics(y_ts, test_preds, average='weighted'):
|
1149
1149
|
"""
|
1150
1150
|
Calculates precision, recall and F1 score for a classification task.
|
1151
|
-
|
1151
|
+
|
1152
1152
|
Args:
|
1153
|
-
|
1153
|
+
y_ts (list or numpy.ndarray): True labels.
|
1154
1154
|
test_preds (list or numpy.ndarray): Predicted labels.
|
1155
|
+
average (str): Type of averaging ('micro', 'macro', 'weighted').
|
1155
1156
|
|
1156
1157
|
Returns:
|
1157
1158
|
tuple: Precision, recall, F1 score.
|
@@ -1192,17 +1193,29 @@ def metrics(y_ts, test_preds):
|
|
1192
1193
|
for c in classes:
|
1193
1194
|
precision[c] = tp[c] / (tp[c] + fp[c]) if (tp[c] + fp[c]) > 0 else 0
|
1194
1195
|
recall[c] = tp[c] / (tp[c] + fn[c]) if (tp[c] + fn[c]) > 0 else 0
|
1195
|
-
f1[c] = 2 * (precision[c] * recall[c]) / (precision[c] +
|
1196
|
-
recall[c]) if (precision[c] + recall[c]) > 0 else 0
|
1196
|
+
f1[c] = 2 * (precision[c] * recall[c]) / (precision[c] + recall[c]) if (precision[c] + recall[c]) > 0 else 0
|
1197
1197
|
|
1198
|
-
|
1199
|
-
list(fp.values()))) if (np.sum(list(tp.values())) + np.sum(list(fp.values()))) > 0 else 0
|
1200
|
-
|
1201
|
-
|
1202
|
-
micro_f1 = 2 * (micro_precision * micro_recall) / (micro_precision +
|
1203
|
-
micro_recall) if (micro_precision + micro_recall) > 0 else 0
|
1198
|
+
if average == 'micro':
|
1199
|
+
precision_val = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(fp.values()))) if (np.sum(list(tp.values())) + np.sum(list(fp.values()))) > 0 else 0
|
1200
|
+
recall_val = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(fn.values()))) if (np.sum(list(tp.values())) + np.sum(list(fn.values()))) > 0 else 0
|
1201
|
+
f1_val = 2 * (precision_val * recall_val) / (precision_val + recall_val) if (precision_val + recall_val) > 0 else 0
|
1204
1202
|
|
1205
|
-
|
1203
|
+
elif average == 'macro':
|
1204
|
+
precision_val = np.mean(list(precision.values()))
|
1205
|
+
recall_val = np.mean(list(recall.values()))
|
1206
|
+
f1_val = np.mean(list(f1.values()))
|
1207
|
+
|
1208
|
+
elif average == 'weighted':
|
1209
|
+
weights = np.array([np.sum(y_test_d == c) for c in classes])
|
1210
|
+
weights = weights / np.sum(weights)
|
1211
|
+
precision_val = np.sum([weights[i] * precision[classes[i]] for i in range(len(classes))])
|
1212
|
+
recall_val = np.sum([weights[i] * recall[classes[i]] for i in range(len(classes))])
|
1213
|
+
f1_val = np.sum([weights[i] * f1[classes[i]] for i in range(len(classes))])
|
1214
|
+
|
1215
|
+
else:
|
1216
|
+
raise ValueError("Invalid value for 'average'. Choose from 'micro', 'macro', 'weighted'.")
|
1217
|
+
|
1218
|
+
return precision_val, recall_val, f1_val
|
1206
1219
|
|
1207
1220
|
|
1208
1221
|
def decode_one_hot(encoded_data):
|
@@ -1315,9 +1328,10 @@ def plot_evaluate(y_test, y_preds, acc_list):
|
|
1315
1328
|
|
1316
1329
|
y_true = np.array(y_true)
|
1317
1330
|
y_preds = np.array(y_preds)
|
1318
|
-
fpr, tpr, thresholds = roc_curve(y_true, y_preds)
|
1319
|
-
precision, recall, f1 = metrics(y_test, y_preds)
|
1320
1331
|
Class = np.unique(decode_one_hot(y_test))
|
1332
|
+
|
1333
|
+
precision, recall, f1 = metrics(y_test, y_preds)
|
1334
|
+
|
1321
1335
|
|
1322
1336
|
# Confusion matrix
|
1323
1337
|
cm = confusion_matrix(y_true, y_preds, len(Class))
|
@@ -1330,17 +1344,66 @@ def plot_evaluate(y_test, y_preds, acc_list):
|
|
1330
1344
|
axs[0, 0].set_xlabel("Predicted Class")
|
1331
1345
|
axs[0, 0].set_ylabel("Actual Class")
|
1332
1346
|
|
1333
|
-
|
1334
|
-
|
1335
|
-
|
1336
|
-
|
1337
|
-
|
1338
|
-
|
1339
|
-
|
1340
|
-
|
1341
|
-
|
1342
|
-
|
1343
|
-
|
1347
|
+
if len(Class) == 2:
|
1348
|
+
fpr, tpr, thresholds = roc_curve(y_true, y_preds)
|
1349
|
+
# ROC Curve
|
1350
|
+
roc_auc = np.trapz(tpr, fpr)
|
1351
|
+
axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
|
1352
|
+
axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
|
1353
|
+
axs[1, 0].set_xlim([0.0, 1.0])
|
1354
|
+
axs[1, 0].set_ylim([0.0, 1.05])
|
1355
|
+
axs[1, 0].set_xlabel('False Positive Rate')
|
1356
|
+
axs[1, 0].set_ylabel('True Positive Rate')
|
1357
|
+
axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
|
1358
|
+
axs[1, 0].legend(loc="lower right")
|
1359
|
+
axs[1, 0].legend(loc="lower right")
|
1360
|
+
else:
|
1361
|
+
|
1362
|
+
for i in range(len(Class)):
|
1363
|
+
|
1364
|
+
y_true_copy = np.copy(y_true)
|
1365
|
+
y_preds_copy = np.copy(y_preds)
|
1366
|
+
|
1367
|
+
y_true_copy[y_true_copy == i] = 0
|
1368
|
+
y_true_copy[y_true_copy != 0] = 1
|
1369
|
+
|
1370
|
+
y_preds_copy[y_preds_copy == i] = 0
|
1371
|
+
y_preds_copy[y_preds_copy != 0] = 1
|
1372
|
+
|
1373
|
+
|
1374
|
+
fpr, tpr, thresholds = roc_curve(y_true_copy, y_preds_copy)
|
1375
|
+
|
1376
|
+
roc_auc = np.trapz(tpr, fpr)
|
1377
|
+
axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
|
1378
|
+
axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
|
1379
|
+
axs[1, 0].set_xlim([0.0, 1.0])
|
1380
|
+
axs[1, 0].set_ylim([0.0, 1.05])
|
1381
|
+
axs[1, 0].set_xlabel('False Positive Rate')
|
1382
|
+
axs[1, 0].set_ylabel('True Positive Rate')
|
1383
|
+
axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
|
1384
|
+
axs[1, 0].legend(loc="lower right")
|
1385
|
+
axs[1, 0].legend(loc="lower right")
|
1386
|
+
|
1387
|
+
|
1388
|
+
"""
|
1389
|
+
accuracy_per_class = []
|
1390
|
+
|
1391
|
+
for cls in Class:
|
1392
|
+
correct = np.sum((y_true == cls) & (y_preds == cls))
|
1393
|
+
total = np.sum(y_true == cls)
|
1394
|
+
accuracy_cls = correct / total if total > 0 else 0.0
|
1395
|
+
accuracy_per_class.append(accuracy_cls)
|
1396
|
+
|
1397
|
+
axs[2, 0].bar(Class, accuracy_per_class, color='b', alpha=0.7)
|
1398
|
+
axs[2, 0].set_xlabel('Class')
|
1399
|
+
axs[2, 0].set_ylabel('Accuracy')
|
1400
|
+
axs[2, 0].set_title('Class-wise Accuracy')
|
1401
|
+
axs[2, 0].set_xticks(Class)
|
1402
|
+
axs[2, 0].grid(True)
|
1403
|
+
"""
|
1404
|
+
|
1405
|
+
|
1406
|
+
|
1344
1407
|
|
1345
1408
|
# Precision, Recall, F1 Score, Accuracy
|
1346
1409
|
metric = ['Precision', 'Recall', 'F1 Score', 'Accuracy']
|
@@ -1358,7 +1421,7 @@ def plot_evaluate(y_test, y_preds, acc_list):
|
|
1358
1421
|
axs[0, 1].set_ylim(0, 1) # Y eksenini 0 ile 1 arasında sınırla
|
1359
1422
|
axs[0, 1].set_xlabel('Metrics')
|
1360
1423
|
axs[0, 1].set_ylabel('Score')
|
1361
|
-
axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy')
|
1424
|
+
axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy (Wighted)')
|
1362
1425
|
axs[0, 1].grid(True, axis='y', linestyle='--', alpha=0.7)
|
1363
1426
|
|
1364
1427
|
# Accuracy
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 2.
|
3
|
+
Version: 2.2.0
|
4
4
|
Summary: 8 new functions: multiple_evaluate , encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate And Code improvements (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -5,7 +5,7 @@ from setuptools import setup, find_packages
|
|
5
5
|
setup(
|
6
6
|
|
7
7
|
name = "pyerualjetwork",
|
8
|
-
version = "2.
|
8
|
+
version = "2.2.0",
|
9
9
|
author = "Hasan Can Beydili",
|
10
10
|
author_email = "tchasancan@gmail.com",
|
11
11
|
description= " 8 new functions: multiple_evaluate , encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate And Code improvements (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)",
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|