pyerualjetwork 2.6.6__tar.gz → 2.7.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pyerualjetwork-2.6.6 → pyerualjetwork-2.7.0}/PKG-INFO +2 -2
- {pyerualjetwork-2.6.6 → pyerualjetwork-2.7.0}/plan/plan.py +110 -107
- {pyerualjetwork-2.6.6 → pyerualjetwork-2.7.0}/pyerualjetwork.egg-info/PKG-INFO +2 -2
- {pyerualjetwork-2.6.6 → pyerualjetwork-2.7.0}/setup.py +2 -2
- {pyerualjetwork-2.6.6 → pyerualjetwork-2.7.0}/plan/__init__.py +0 -0
- {pyerualjetwork-2.6.6 → pyerualjetwork-2.7.0}/pyerualjetwork.egg-info/SOURCES.txt +0 -0
- {pyerualjetwork-2.6.6 → pyerualjetwork-2.7.0}/pyerualjetwork.egg-info/dependency_links.txt +0 -0
- {pyerualjetwork-2.6.6 → pyerualjetwork-2.7.0}/pyerualjetwork.egg-info/top_level.txt +0 -0
- {pyerualjetwork-2.6.6 → pyerualjetwork-2.7.0}/setup.cfg +0 -0
@@ -1,7 +1,7 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 2.
|
4
|
-
Summary:
|
3
|
+
Version: 2.7.0
|
4
|
+
Summary: Optimized for Vs Code
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
7
7
|
Keywords: model evaluation,classifcation,potentiation learning artficial neural networks
|
@@ -1,4 +1,3 @@
|
|
1
|
-
|
2
1
|
# -*- coding: utf-8 -*-
|
3
2
|
"""
|
4
3
|
Created on Tue Jun 18 23:32:16 2024
|
@@ -23,12 +22,13 @@ from tqdm import tqdm
|
|
23
22
|
def fit(
|
24
23
|
x_train: List[Union[int, float]],
|
25
24
|
y_train: List[Union[int, float]], # At least two.. and one hot encoded
|
26
|
-
show_training,
|
27
|
-
show_count= None,
|
28
25
|
val= None,
|
26
|
+
val_count = None,
|
27
|
+
activation_potentiation=None, # (float): Input activation_potentiation (optional)
|
29
28
|
x_val= None,
|
30
29
|
y_val= None,
|
31
|
-
|
30
|
+
show_training = None,
|
31
|
+
show_count= None
|
32
32
|
) -> str:
|
33
33
|
|
34
34
|
infoPLAN = """
|
@@ -37,33 +37,55 @@ def fit(
|
|
37
37
|
Args:
|
38
38
|
x_train (list[num]): List of input data.
|
39
39
|
y_train (list[num]): List of target labels. (one hot encoded)
|
40
|
-
|
41
|
-
show_count (None, int): How many learning steps in total will be displayed in a single figure? (Adjust according to your hardware) Default: 10 (optional)
|
42
|
-
val (None, True or 'final'): validation in training process ? None, True or 'final' Default: None
|
40
|
+
val (None, True or 'final'): validation in training process ? None, True or 'final' Default: None (optional)
|
43
41
|
val_count (None, int): After how many examples learned will an accuracy test be performed? Default: 0.1 (%10) (optional)
|
44
|
-
x_val (list[num]): List of validation data. (optional) Default: x_train
|
45
|
-
y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: y_train
|
46
42
|
activation_potentiation (float): Input activation potentiation (for binary injection) (optional) in range: -1, 1
|
43
|
+
x_val (list[num]): List of validation data. (optional) Default: 10% of x_train (auto_balanced)
|
44
|
+
y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: 10% of y_train (auto_balanced)
|
45
|
+
show_training (bool, str): True, None or'final'
|
46
|
+
show_count (None, int): How many learning steps in total will be displayed in a single figure? (Adjust according to your hardware) Default: 10 (optional)
|
47
47
|
Returns:
|
48
48
|
list([num]): (Weight matrices list, train_predictions list, Train_acc).
|
49
49
|
error handled ?: Process status ('e')
|
50
50
|
"""
|
51
|
-
|
52
51
|
|
53
52
|
if len(x_train) != len(y_train):
|
54
53
|
|
55
|
-
print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit", infoPLAN)
|
54
|
+
print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit", infoPLAN + Style.RESET_ALL)
|
56
55
|
return 'e'
|
57
|
-
|
58
|
-
if x_val == None and y_val == None:
|
59
|
-
|
60
|
-
x_val = x_train
|
61
|
-
y_val = y_train
|
62
56
|
|
63
57
|
if val == True or val == 'final':
|
58
|
+
|
59
|
+
try:
|
60
|
+
|
61
|
+
if x_val == None and y_val == None:
|
62
|
+
|
63
|
+
x_train, x_val, y_train, y_val = split(x_train, y_train,test_size=0.1,random_state=42)
|
64
|
+
|
65
|
+
x_train, y_train = auto_balancer(x_train, y_train)
|
66
|
+
x_val, y_val = auto_balancer(x_val, y_val)
|
67
|
+
|
68
|
+
y_train, y_val = encode_one_hot(y_train, y_val)
|
69
|
+
|
70
|
+
except:
|
71
|
+
pass
|
72
|
+
|
73
|
+
if val == True:
|
74
|
+
|
75
|
+
if val_count == None:
|
76
|
+
|
77
|
+
val_count = 0.1
|
78
|
+
|
79
|
+
v_iter = 0
|
80
|
+
|
81
|
+
if val == 'final':
|
82
|
+
|
83
|
+
val_count = 0.99
|
64
84
|
|
65
|
-
|
66
|
-
|
85
|
+
val_count = int(len(x_train) * val_count)
|
86
|
+
val_count_copy = val_count
|
87
|
+
val_bar = tqdm(total=1, desc="Validating Accuracy", ncols=120)
|
88
|
+
val_list = [] * val_count
|
67
89
|
|
68
90
|
if show_count == None:
|
69
91
|
|
@@ -85,7 +107,6 @@ def fit(
|
|
85
107
|
|
86
108
|
neurons = [len(class_count), len(class_count)]
|
87
109
|
layers = ['fex']
|
88
|
-
val_list = []
|
89
110
|
|
90
111
|
x_train[0] = np.array(x_train[0])
|
91
112
|
x_train[0] = x_train[0].ravel()
|
@@ -98,8 +119,10 @@ def fit(
|
|
98
119
|
|
99
120
|
y = decode_one_hot(y_train)
|
100
121
|
|
122
|
+
train_progress = tqdm(total=len(x_train),leave=False, desc="Training",ncols= 120)
|
123
|
+
|
101
124
|
for index, inp in enumerate(x_train):
|
102
|
-
|
125
|
+
|
103
126
|
progress = index / len(x_train) * 100
|
104
127
|
|
105
128
|
inp = np.array(inp)
|
@@ -123,31 +146,28 @@ def fit(
|
|
123
146
|
LTPW[i] = LTPW[i] + w
|
124
147
|
|
125
148
|
|
126
|
-
if val == True:
|
149
|
+
if val == True and index == val_count:
|
127
150
|
|
128
|
-
if show_training == True:
|
129
|
-
try:
|
130
|
-
plt.close(fig)
|
131
151
|
|
132
|
-
|
133
|
-
pass
|
152
|
+
val_count += val_count_copy
|
134
153
|
|
135
|
-
validation_model = evaluate(x_val, y_val,
|
154
|
+
validation_model = evaluate(x_val, y_val, LTPW, activation_potentiation, None)
|
136
155
|
|
137
156
|
val_acc = validation_model[get_acc()]
|
138
157
|
|
139
158
|
val_list.append(val_acc)
|
140
159
|
|
141
|
-
if
|
160
|
+
if v_iter == 0:
|
142
161
|
|
143
162
|
val_bar.update(val_acc)
|
144
|
-
|
145
|
-
if index != 0:
|
146
163
|
|
147
|
-
|
164
|
+
|
165
|
+
if v_iter != 0:
|
148
166
|
|
167
|
+
val_acc = val_acc - val_list[v_iter - 1]
|
149
168
|
val_bar.update(val_acc)
|
150
169
|
|
170
|
+
v_iter += 1
|
151
171
|
|
152
172
|
if show_training == True:
|
153
173
|
|
@@ -192,17 +212,18 @@ def fit(
|
|
192
212
|
title_info = 'Weight Matrix Of Fex Layer'
|
193
213
|
|
194
214
|
|
195
|
-
|
215
|
+
|
196
216
|
|
197
217
|
progress_status = f"{progress:.1f}"
|
198
218
|
fig.suptitle(suptitle_info + progress_status)
|
199
219
|
plt.draw()
|
200
220
|
plt.pause(0.1)
|
201
221
|
|
202
|
-
|
203
|
-
|
222
|
+
|
223
|
+
STPW = weight_identification(
|
204
224
|
len(layers) - 1, len(class_count), neurons, x_train_size)
|
205
225
|
|
226
|
+
train_progress.update(1)
|
206
227
|
|
207
228
|
if show_training == 'final':
|
208
229
|
|
@@ -233,9 +254,8 @@ def fit(
|
|
233
254
|
|
234
255
|
val_list.append(val_acc)
|
235
256
|
|
236
|
-
val_bar.update(val_acc)
|
237
|
-
|
238
|
-
|
257
|
+
val_bar.update(val_acc)
|
258
|
+
|
239
259
|
return LTPW
|
240
260
|
|
241
261
|
# FUNCTIONS -----
|
@@ -449,10 +469,10 @@ def Relu(
|
|
449
469
|
def evaluate(
|
450
470
|
x_test, # list[num]: Test input data.
|
451
471
|
y_test, # list[num]: Test labels.
|
452
|
-
show_metrices, # show_metrices (bool): (True or False)
|
453
472
|
W, # list[num]: Weight matrix list of the neural network.
|
454
|
-
activation_potentiation=None, # activation_potentiation (float or None): Threshold value for comparison. (optional)
|
455
|
-
acc_bar_status
|
473
|
+
activation_potentiation=None, # activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
|
474
|
+
acc_bar_status=True, # acc_bar_status (bool): Loading bar for accuracy (True or None) (optional) Default: True
|
475
|
+
show_metrices=None # show_metrices (bool): (True or None) (optional) Default: None
|
456
476
|
) -> tuple:
|
457
477
|
infoTestModel = """
|
458
478
|
Tests the neural network model with the given test data.
|
@@ -460,10 +480,10 @@ def evaluate(
|
|
460
480
|
Args:
|
461
481
|
x_test (list[num]): Test input data.
|
462
482
|
y_test (list[num]): Test labels.
|
463
|
-
show_metrices (bool): (True or False)
|
464
483
|
W (list[num]): Weight matrix list of the neural network.
|
465
|
-
activation_potentiation (float or None): Threshold value for comparison. (optional)
|
466
|
-
acc_bar_status (bool): Loading bar for accuracy (True or None)
|
484
|
+
activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
|
485
|
+
acc_bar_status (bool): Loading bar for accuracy (True or None) (optional) Default: True
|
486
|
+
show_metrices (bool): (True or None) (optional) Default: None
|
467
487
|
|
468
488
|
Returns:
|
469
489
|
tuple: A tuple containing the predicted labels and the accuracy of the model.
|
@@ -479,25 +499,27 @@ def evaluate(
|
|
479
499
|
|
480
500
|
for i, w in enumerate(W):
|
481
501
|
Wc[i] = np.copy(w)
|
482
|
-
print('\rCopying weights.....', i+1, '/', len(W), end="")
|
483
502
|
|
484
|
-
|
503
|
+
|
485
504
|
if acc_bar_status == True:
|
486
|
-
|
505
|
+
|
506
|
+
test_progress = tqdm(total=len(x_test),leave=False, desc='Testing',ncols=120)
|
507
|
+
acc_bar = tqdm(total=1, desc="Test Accuracy", ncols=120)
|
508
|
+
|
509
|
+
|
487
510
|
for inpIndex, Input in enumerate(x_test):
|
488
511
|
Input = np.array(Input)
|
489
512
|
Input = Input.ravel()
|
490
|
-
uni_start_time = time.time()
|
491
513
|
neural_layer = Input
|
492
|
-
|
514
|
+
|
493
515
|
for index, Layer in enumerate(layers):
|
494
|
-
|
516
|
+
|
495
517
|
neural_layer = normalization(neural_layer)
|
496
|
-
|
518
|
+
|
497
519
|
if Layer == 'fex':
|
498
520
|
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
|
499
|
-
|
500
|
-
|
521
|
+
|
522
|
+
|
501
523
|
for i, w in enumerate(Wc):
|
502
524
|
W[i] = np.copy(w)
|
503
525
|
RealOutput = np.argmax(y_test[inpIndex])
|
@@ -505,20 +527,21 @@ def evaluate(
|
|
505
527
|
if RealOutput == PredictedOutput:
|
506
528
|
true += 1
|
507
529
|
acc = true / len(y_test)
|
508
|
-
|
509
|
-
|
530
|
+
|
531
|
+
|
510
532
|
acc_list.append(acc)
|
511
533
|
y_preds[inpIndex] = PredictedOutput
|
512
534
|
|
513
535
|
if acc_bar_status == True:
|
536
|
+
test_progress.update(1)
|
514
537
|
if inpIndex == 0:
|
515
538
|
acc_bar.update(acc)
|
516
539
|
|
517
540
|
else:
|
518
541
|
acc = acc - acc_list[inpIndex - 1]
|
519
542
|
acc_bar.update(acc)
|
520
|
-
|
521
|
-
if show_metrices == True:
|
543
|
+
|
544
|
+
if show_metrices == True:
|
522
545
|
plot_evaluate(y_test, y_preds, acc_list)
|
523
546
|
|
524
547
|
|
@@ -530,7 +553,7 @@ def evaluate(
|
|
530
553
|
print(Fore.RED + "ERROR: Are you sure weights are loaded ? from: evaluate" +
|
531
554
|
infoTestModel + Style.RESET_ALL)
|
532
555
|
return 'e'
|
533
|
-
|
556
|
+
|
534
557
|
return W, y_preds, acc
|
535
558
|
|
536
559
|
|
@@ -902,7 +925,7 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
902
925
|
|
903
926
|
except:
|
904
927
|
|
905
|
-
|
928
|
+
pass
|
906
929
|
|
907
930
|
|
908
931
|
layers = ['fex']
|
@@ -931,7 +954,7 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
931
954
|
return neural_layer
|
932
955
|
|
933
956
|
|
934
|
-
def predict_model_ram(Input,
|
957
|
+
def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=None):
|
935
958
|
|
936
959
|
infopredict_model_ram = """
|
937
960
|
Function to make a prediction using a divided potentiation learning artificial neural network (PLAN).
|
@@ -939,17 +962,19 @@ def predict_model_ram(Input, scaler_params, W, activation_potentiation=None):
|
|
939
962
|
|
940
963
|
Arguments:
|
941
964
|
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
942
|
-
scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
943
965
|
W (list of ndarrays): Weights of the model.
|
944
|
-
|
966
|
+
scaler_params (int, float): standard scaler params list: mean,std. (optional) Default: None.
|
967
|
+
activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
|
945
968
|
|
946
969
|
Returns:
|
947
970
|
ndarray: Output from the model.
|
948
971
|
"""
|
949
|
-
|
950
|
-
|
972
|
+
try:
|
973
|
+
if scaler_params != None:
|
951
974
|
|
952
|
-
|
975
|
+
Input = standard_scaler(None, Input, scaler_params)
|
976
|
+
except:
|
977
|
+
Input = standard_scaler(None, Input, scaler_params)
|
953
978
|
|
954
979
|
layers = ['fex']
|
955
980
|
|
@@ -1005,7 +1030,7 @@ def auto_balancer(x_train, y_train):
|
|
1005
1030
|
MinCount = min(classes)
|
1006
1031
|
|
1007
1032
|
BalancedIndices = []
|
1008
|
-
for i in range(class_count):
|
1033
|
+
for i in tqdm(range(class_count),leave=False,desc='Balancing Data',ncols=120):
|
1009
1034
|
if len(ClassIndices[i]) > MinCount:
|
1010
1035
|
SelectedIndices = np.random.choice(
|
1011
1036
|
ClassIndices[i], MinCount, replace=False)
|
@@ -1022,7 +1047,7 @@ def auto_balancer(x_train, y_train):
|
|
1022
1047
|
print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoauto_balancer)
|
1023
1048
|
return 'e'
|
1024
1049
|
|
1025
|
-
return BalancedInputs, BalancedLabels
|
1050
|
+
return np.array(BalancedInputs), np.array(BalancedLabels)
|
1026
1051
|
|
1027
1052
|
|
1028
1053
|
def synthetic_augmentation(x_train, y_train):
|
@@ -1052,7 +1077,7 @@ def synthetic_augmentation(x_train, y_train):
|
|
1052
1077
|
x_balanced = list(x)
|
1053
1078
|
y_balanced = list(y)
|
1054
1079
|
|
1055
|
-
for class_label in range(class_count):
|
1080
|
+
for class_label in tqdm(range(class_count), leave=False, desc='Augmenting Data',ncols= 120):
|
1056
1081
|
class_indices = [i for i, label in enumerate(
|
1057
1082
|
y) if np.argmax(label) == class_label]
|
1058
1083
|
num_samples = len(class_indices)
|
@@ -1092,6 +1117,15 @@ def standard_scaler(x_train, x_test, scaler_params=None):
|
|
1092
1117
|
tuple
|
1093
1118
|
Standardized training and test datasets
|
1094
1119
|
"""
|
1120
|
+
try:
|
1121
|
+
|
1122
|
+
x_train = x_train.tolist()
|
1123
|
+
x_test = x_test.tolist()
|
1124
|
+
|
1125
|
+
except:
|
1126
|
+
|
1127
|
+
pass
|
1128
|
+
|
1095
1129
|
try:
|
1096
1130
|
|
1097
1131
|
if scaler_params == None and x_test != None:
|
@@ -1175,7 +1209,6 @@ def split(X, y, test_size, random_state):
|
|
1175
1209
|
Returns:
|
1176
1210
|
tuple: x_train, x_test, y_train, y_test as ordered training and testing data subsets.
|
1177
1211
|
"""
|
1178
|
-
# Size of the dataset
|
1179
1212
|
num_samples = X.shape[0]
|
1180
1213
|
|
1181
1214
|
if isinstance(test_size, float):
|
@@ -1392,7 +1425,6 @@ def plot_evaluate(y_test, y_preds, acc_list):
|
|
1392
1425
|
|
1393
1426
|
# Confusion matrix
|
1394
1427
|
cm = confusion_matrix(y_true, y_preds, len(Class))
|
1395
|
-
# Subplot içinde düzenleme
|
1396
1428
|
fig, axs = plt.subplots(2, 2, figsize=(16, 12))
|
1397
1429
|
|
1398
1430
|
# Confusion Matrix
|
@@ -1511,16 +1543,19 @@ def manuel_balancer(x_train, y_train, target_samples_per_class):
|
|
1511
1543
|
x_balanced -- Balanced input dataset (NumPy array format)
|
1512
1544
|
y_balanced -- Balanced class labels (one-hot encoded, NumPy array format)
|
1513
1545
|
"""
|
1514
|
-
|
1515
|
-
|
1516
|
-
|
1546
|
+
try:
|
1547
|
+
x_train = np.array(x_train)
|
1548
|
+
y_train = np.array(y_train)
|
1549
|
+
except:
|
1550
|
+
print(Fore.GREEN + "x_tarin and y_train already numpyarray." + Style.RESET_ALL)
|
1551
|
+
pass
|
1517
1552
|
classes = np.arange(y_train.shape[1])
|
1518
1553
|
class_count = len(classes)
|
1519
1554
|
|
1520
1555
|
x_balanced = []
|
1521
1556
|
y_balanced = []
|
1522
1557
|
|
1523
|
-
for class_label in range(class_count):
|
1558
|
+
for class_label in tqdm(range(class_count),leave=False, desc='Augmenting Data',ncols= 120):
|
1524
1559
|
class_indices = np.where(np.argmax(y_train, axis=1) == class_label)[0]
|
1525
1560
|
num_samples = len(class_indices)
|
1526
1561
|
|
@@ -1542,8 +1577,7 @@ def manuel_balancer(x_train, y_train, target_samples_per_class):
|
|
1542
1577
|
additional_labels = np.zeros((samples_to_add, y_train.shape[1]))
|
1543
1578
|
|
1544
1579
|
for i in range(samples_to_add):
|
1545
|
-
|
1546
|
-
|
1580
|
+
|
1547
1581
|
random_indices = np.random.choice(class_indices, 2, replace=False)
|
1548
1582
|
sample1 = x_train[random_indices[0]]
|
1549
1583
|
sample2 = x_train[random_indices[1]]
|
@@ -1554,41 +1588,10 @@ def manuel_balancer(x_train, y_train, target_samples_per_class):
|
|
1554
1588
|
additional_samples[i] = synthetic_sample
|
1555
1589
|
additional_labels[i] = y_train[class_indices[0]]
|
1556
1590
|
|
1557
|
-
|
1558
|
-
calculating_est = round(
|
1559
|
-
(uni_end_time - uni_start_time) * (samples_to_add - i), 3)
|
1560
|
-
|
1561
|
-
if calculating_est < 60:
|
1562
|
-
print('\rest......(sec):', calculating_est, '\n', end="")
|
1563
|
-
|
1564
|
-
elif calculating_est > 60 and calculating_est < 3600:
|
1565
|
-
print('\rest......(min):', calculating_est/60, '\n', end="")
|
1566
|
-
|
1567
|
-
elif calculating_est > 3600:
|
1568
|
-
print('\rest......(h):', calculating_est/3600, '\n', end="")
|
1569
|
-
|
1570
|
-
print('Augmenting: ', class_label, '/', class_count, '...', i, "/", samples_to_add, "\n", end="")
|
1571
|
-
|
1591
|
+
|
1572
1592
|
x_balanced.append(additional_samples)
|
1573
1593
|
y_balanced.append(additional_labels)
|
1574
|
-
|
1575
|
-
|
1576
|
-
EndTime = time.time()
|
1577
|
-
|
1578
|
-
calculating_est = round(EndTime - start_time, 2)
|
1579
|
-
|
1580
|
-
print(Fore.GREEN + " \nBalancing Finished with 0 ERROR\n" + Style.RESET_ALL)
|
1581
|
-
|
1582
|
-
if calculating_est < 60:
|
1583
|
-
print('Total balancing time(sec): ', calculating_est)
|
1584
|
-
|
1585
|
-
elif calculating_est > 60 and calculating_est < 3600:
|
1586
|
-
print('Total balancing time(min): ', calculating_est/60)
|
1587
|
-
|
1588
|
-
elif calculating_est > 3600:
|
1589
|
-
print('Total balancing time(h): ', calculating_est/3600)
|
1590
1594
|
|
1591
|
-
# Stack the balanced arrays
|
1592
1595
|
x_balanced = np.vstack(x_balanced)
|
1593
1596
|
y_balanced = np.vstack(y_balanced)
|
1594
1597
|
|
@@ -1,7 +1,7 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 2.
|
4
|
-
Summary:
|
3
|
+
Version: 2.7.0
|
4
|
+
Summary: Optimized for Vs Code
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
7
7
|
Keywords: model evaluation,classifcation,potentiation learning artficial neural networks
|
@@ -5,10 +5,10 @@ from setuptools import setup, find_packages
|
|
5
5
|
setup(
|
6
6
|
|
7
7
|
name = "pyerualjetwork",
|
8
|
-
version = "2.
|
8
|
+
version = "2.7.0",
|
9
9
|
author = "Hasan Can Beydili",
|
10
10
|
author_email = "tchasancan@gmail.com",
|
11
|
-
description= "
|
11
|
+
description= "Optimized for Vs Code",
|
12
12
|
packages = find_packages(),
|
13
13
|
keywords = ["model evaluation", "classifcation", 'potentiation learning artficial neural networks'],
|
14
14
|
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|